mirror of
https://github.com/rclone/rclone.git
synced 2025-12-22 19:23:40 +00:00
Compare commits
1 Commits
fix-8383-b
...
fix-auth-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69ae5f2aaf |
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
77
.github/workflows/build_publish_beta_docker_image.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Docker beta build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
||||
# GitHub Actions at the start of a workflow run to identify the job.
|
||||
# This is used to authenticate against GitHub Container Registry.
|
||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
||||
# for more detailed information.
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
push: true # push the image to ghcr
|
||||
tags: |
|
||||
ghcr.io/rclone/rclone:beta
|
||||
rclone/rclone:beta
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
cache-from: type=gha, scope=${{ github.workflow }}
|
||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
||||
provenance: false
|
||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
||||
# https://github.com/docker/build-push-action/issues/252
|
||||
- name: Show disk usage
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
294
.github/workflows/build_publish_docker_image.yml
vendored
294
.github/workflows/build_publish_docker_image.yml
vendored
@@ -1,294 +0,0 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
|
||||
|
||||
name: Build & Push Docker Images
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '**'
|
||||
tags:
|
||||
- '**'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build-image:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/386
|
||||
runs-on: ubuntu-24.04
|
||||
- platform: linux/arm64
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v7
|
||||
runs-on: ubuntu-24.04-arm
|
||||
- platform: linux/arm/v6
|
||||
runs-on: ubuntu-24.04-arm
|
||||
|
||||
name: Build Docker Image for ${{ matrix.platform }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
|
||||
steps:
|
||||
- name: Free Space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set PLATFORM Variable
|
||||
run: |
|
||||
platform=${{ matrix.platform }}
|
||||
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CACHE_NAME Variable
|
||||
shell: python
|
||||
run: |
|
||||
import os, re
|
||||
|
||||
def slugify(input_string, max_length=63):
|
||||
slug = input_string.lower()
|
||||
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
|
||||
slug = slug.strip()
|
||||
slug = re.sub(r'\s+', '-', slug)
|
||||
slug = re.sub(r'-+', '-', slug)
|
||||
slug = slug[:max_length]
|
||||
slug = re.sub(r'[-]+$', '', slug)
|
||||
return slug
|
||||
|
||||
ref_name_slug = "cache"
|
||||
|
||||
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
|
||||
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"CACHE_NAME={ref_name_slug}\n")
|
||||
|
||||
- name: Get ImageOS
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
return process.env.ImageOS
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
# Cache only the go builds, the module download is cached via the docker layer caching
|
||||
path: |
|
||||
go-build-cache
|
||||
|
||||
- name: Inject Go Build Cache into Docker
|
||||
uses: reproducible-containers/buildkit-cache-dance@v3
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
"go-build-cache": "/root/.cache/go-build"
|
||||
}
|
||||
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and Publish Image Digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
provenance: false
|
||||
# don't specify 'tags' here (error "get can't push tagged ref by digest")
|
||||
# tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
outputs: |
|
||||
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
|
||||
cache-from: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
|
||||
cache-to: |
|
||||
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
|
||||
|
||||
- name: Export Image Digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
retention-days: 1
|
||||
if-no-files-found: error
|
||||
|
||||
merge-image:
|
||||
name: Merge & Push Final Docker Image
|
||||
runs-on: ubuntu-24.04
|
||||
needs:
|
||||
- build-image
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set REPO_NAME Variable
|
||||
run: |
|
||||
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Extract Metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
with:
|
||||
images: |
|
||||
${{ env.REPO_NAME }}
|
||||
ghcr.io/${{ env.REPO_NAME }}
|
||||
labels: |
|
||||
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
|
||||
org.opencontainers.image.vendor=${{ github.repository_owner }}
|
||||
org.opencontainers.image.authors=rclone <https://github.com/rclone>
|
||||
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=ref,event=pr
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=beta,enable={{is_default_branch}}
|
||||
|
||||
- name: Extract Tags
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
|
||||
tags_string = " ".join(tags)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"TAGS={tags_string}\n")
|
||||
|
||||
- name: Extract Annotations
|
||||
shell: python
|
||||
run: |
|
||||
import json, os
|
||||
|
||||
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
|
||||
metadata = json.loads(metadata_json)
|
||||
|
||||
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
|
||||
annotations_string = " ".join(annotations)
|
||||
|
||||
with open(os.environ['GITHUB_ENV'], 'a') as env:
|
||||
env.write(f"ANNOTATIONS={annotations_string}\n")
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
# This is the user that triggered the Workflow. In this case, it will
|
||||
# either be the user whom created the Release or manually triggered
|
||||
# the workflow_dispatch.
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create & Push Manifest List
|
||||
working-directory: /tmp/digests
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
${{ env.TAGS }} \
|
||||
${{ env.ANNOTATIONS }} \
|
||||
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
|
||||
|
||||
- name: Inspect and Run Multi-Platform Image
|
||||
run: |
|
||||
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
|
||||
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version
|
||||
@@ -1,45 +0,0 @@
|
||||
---
|
||||
# Github Actions release for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
|
||||
|
||||
name: Release Build for Docker Plugin
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
89
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
89
.github/workflows/build_publish_release_docker_image.yml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: Docker release build
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
runs-on: ubuntu-latest
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
id: actual_patch_version
|
||||
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
|
||||
- name: Get actual minor version
|
||||
id: actual_minor_version
|
||||
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
|
||||
- name: Get actual major version
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||
push: true
|
||||
tags: |
|
||||
rclone/rclone:latest
|
||||
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
|
||||
build_docker_volume_plugin:
|
||||
if: github.repository == 'rclone/rclone'
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Free some space
|
||||
shell: bash
|
||||
run: |
|
||||
df -h .
|
||||
# Remove android SDK
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
# Remove .net runtime
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
shell: bash
|
||||
run: |
|
||||
VER=${GITHUB_REF#refs/tags/}
|
||||
PLUGIN_USER=rclone
|
||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
||||
export PLUGIN_USER PLUGIN_ARCH
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
||||
done
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
||||
46
Dockerfile
46
Dockerfile
@@ -1,47 +1,21 @@
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
ARG CGO_ENABLED=0
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN echo "**** Set Go Environment Variables ****" && \
|
||||
go env -w GOCACHE=/root/.cache/go-build
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
make \
|
||||
bash \
|
||||
gawk \
|
||||
git
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN echo "**** Download Go Dependencies ****" && \
|
||||
go mod download -x
|
||||
|
||||
RUN echo "**** Verify Go Dependencies ****" && \
|
||||
go mod verify
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
|
||||
echo "**** Build Binary ****" && \
|
||||
make
|
||||
|
||||
RUN echo "**** Print Version Binary ****" && \
|
||||
./rclone version
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN echo "**** Install Dependencies ****" && \
|
||||
apk add --no-cache \
|
||||
ca-certificates \
|
||||
fuse3 \
|
||||
tzdata && \
|
||||
echo "Enable user_allow_other in fuse" && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
|
||||
@@ -300,13 +300,14 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1604,6 +1605,9 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1883,6 +1887,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
|
||||
@@ -258,6 +258,12 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
|
||||
@@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
if len(data) > maxMetadataSizeWritten {
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
|
||||
@@ -203,6 +203,7 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1211,7 +1212,6 @@ func fixMimeType(mimeTypeIn string) string {
|
||||
}
|
||||
return mimeTypeOut
|
||||
}
|
||||
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
@@ -1222,11 +1222,9 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
}
|
||||
|
||||
func isLinkMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/x-link-")
|
||||
}
|
||||
@@ -1659,8 +1657,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
|
||||
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
ctx context.Context, remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool,
|
||||
) (o fs.Object, err error) {
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
@@ -1851,7 +1848,6 @@ func linkTemplate(mt string) *template.Template {
|
||||
})
|
||||
return _linkTemplates[mt]
|
||||
}
|
||||
|
||||
func (f *Fs) fetchFormats(ctx context.Context) {
|
||||
fetchFormatsOnce.Do(func() {
|
||||
var about *drive.About
|
||||
@@ -1897,8 +1893,7 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
|
||||
// Look through the exportExtensions and find the first format that can be
|
||||
// converted. If none found then return ("", "", false)
|
||||
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
|
||||
extension, mimeType string, isDocument bool,
|
||||
) {
|
||||
extension, mimeType string, isDocument bool) {
|
||||
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
|
||||
if isDocument {
|
||||
for _, _extension := range f.exportExtensions {
|
||||
@@ -2694,7 +2689,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
trashedFiles := false
|
||||
var trashedFiles = false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2931,6 +2926,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -3191,7 +3187,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -4023,13 +4018,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
results, err := f.query(ctx, query)
|
||||
var results, err = f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
return nil, errors.New("need a query argument")
|
||||
case "rescue":
|
||||
dirID := ""
|
||||
_, delete := opt["delete"]
|
||||
@@ -4089,7 +4085,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -4104,8 +4099,7 @@ func (o *baseObject) Size() int64 {
|
||||
|
||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error,
|
||||
) {
|
||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
@@ -4318,13 +4312,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
// the HEAD in the listing to this GET. This stops rclone marking
|
||||
// the transfer as corrupted.
|
||||
var offset, end int64 = 0, -1
|
||||
newOptions := options[:0]
|
||||
var newOptions = options[:0]
|
||||
for _, o := range options {
|
||||
// Note that Range requests don't work on Google docs:
|
||||
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
|
||||
@@ -4351,10 +4344,9 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
data := o.content
|
||||
var data = o.content
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -4379,8 +4371,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
) (info *drive.File, err error) {
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
@@ -4458,7 +4449,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
importMimeType := ""
|
||||
@@ -4554,7 +4544,6 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
|
||||
func (o *documentObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
@@ -92,21 +92,6 @@ Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.`,
|
||||
Default: 10 * fs.Mebi,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "access",
|
||||
Help: "Files and folders will be uploaded with this access permission (default private)",
|
||||
Default: "private",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
Help: "The file or folder access can be granted in a way that will allow select users to view, read or write what is absolutely essential for them.",
|
||||
}, {
|
||||
Value: "public",
|
||||
Help: "The file or folder can be downloaded by anyone from a web browser. The link can be shared in any way,",
|
||||
}, {
|
||||
Value: "hidden",
|
||||
Help: "The file or folder can be accessed has the same restrictions as Public if the user knows the URL of the file or folder link in order to access the contents",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -117,7 +102,6 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Access string `config:"access"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
@@ -751,23 +735,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// getAccessLevel is a helper function to determine access level integer
|
||||
func getAccessLevel(access string) int64 {
|
||||
var accessLevel int64
|
||||
switch access {
|
||||
case "private":
|
||||
accessLevel = 0
|
||||
case "public":
|
||||
accessLevel = 1
|
||||
case "hidden":
|
||||
accessLevel = 2
|
||||
default:
|
||||
accessLevel = 0
|
||||
fs.Errorf(nil, "Invalid access: %s, defaulting to private", access)
|
||||
}
|
||||
return accessLevel
|
||||
}
|
||||
|
||||
// DirCacher methods
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
@@ -780,7 +747,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: f.opt.Enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: getAccessLevel(f.opt.Access),
|
||||
FolderIsPublic: 0,
|
||||
FolderPublicUpl: 0,
|
||||
FolderPublicDisplay: 0,
|
||||
FolderPublicDnl: 0,
|
||||
@@ -1113,7 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Set permissions
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: getAccessLevel(o.fs.opt.Access)}
|
||||
update := permissions{SessionID: o.fs.session.SessionID, FileID: o.id, FileIsPublic: 0}
|
||||
// fs.Debugf(nil, "Permissions : %#v", update)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/go-sdk-core/v5/core"
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
)
|
||||
|
||||
// Authenticator defines an interface for obtaining an IAM token.
|
||||
type Authenticator interface {
|
||||
GetToken() (string, error)
|
||||
}
|
||||
|
||||
// IbmIamSigner is a structure for signing requests using IBM IAM.
|
||||
// Requeres APIKey and Resource InstanceID
|
||||
type IbmIamSigner struct {
|
||||
APIKey string
|
||||
InstanceID string
|
||||
Auth Authenticator
|
||||
}
|
||||
|
||||
// SignHTTP signs requests using IBM IAM token.
|
||||
func (signer *IbmIamSigner) SignHTTP(ctx context.Context, credentials aws.Credentials, req *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4signer.SignerOptions)) error {
|
||||
var authenticator Authenticator
|
||||
if signer.Auth != nil {
|
||||
authenticator = signer.Auth
|
||||
} else {
|
||||
authenticator = &core.IamAuthenticator{ApiKey: signer.APIKey}
|
||||
}
|
||||
token, err := authenticator.GetToken()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
req.Header.Set("ibm-service-instance-id", signer.InstanceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoOpCredentialsProvider is needed since S3 SDK requires having credentials, eventhough authentication is happening via IBM IAM.
|
||||
type NoOpCredentialsProvider struct{}
|
||||
|
||||
// Retrieve returns mock credentials for the NoOpCredentialsProvider.
|
||||
func (n *NoOpCredentialsProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
|
||||
return aws.Credentials{
|
||||
AccessKeyID: "NoOpAccessKey",
|
||||
SecretAccessKey: "NoOpSecretKey",
|
||||
SessionToken: "",
|
||||
Source: "NoOpCredentialsProvider",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired always returns false
|
||||
func (n *NoOpCredentialsProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type MockAuthenticator struct {
|
||||
Token string
|
||||
Error error
|
||||
}
|
||||
|
||||
func (m *MockAuthenticator) GetToken() (string, error) {
|
||||
return m.Token, m.Error
|
||||
}
|
||||
|
||||
func TestSignHTTP(t *testing.T) {
|
||||
apiKey := "mock-api-key"
|
||||
instanceID := "mock-instance-id"
|
||||
token := "mock-iam-token"
|
||||
mockAuth := &MockAuthenticator{
|
||||
Token: token,
|
||||
Error: nil,
|
||||
}
|
||||
signer := &IbmIamSigner{
|
||||
APIKey: apiKey,
|
||||
InstanceID: instanceID,
|
||||
Auth: mockAuth,
|
||||
}
|
||||
req, err := http.NewRequest("GET", "https://example.com", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create HTTP request: %v", err)
|
||||
}
|
||||
credentials := aws.Credentials{
|
||||
AccessKeyID: "mock-access-key",
|
||||
SecretAccessKey: "mock-secret-key",
|
||||
}
|
||||
err = signer.SignHTTP(context.TODO(), credentials, req, "payload-hash", "service", "region", time.Now())
|
||||
assert.NoError(t, err, "Expected no error")
|
||||
assert.Equal(t, "Bearer "+token, req.Header.Get("Authorization"), "Authorization header should be set correctly")
|
||||
assert.Equal(t, instanceID, req.Header.Get("ibm-service-instance-id"), "ibm-service-instance-id header should be set correctly")
|
||||
}
|
||||
190
backend/s3/s3.go
190
backend/s3/s3.go
@@ -36,8 +36,8 @@ import (
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
"github.com/ncw/swift/v2"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
@@ -934,67 +934,34 @@ func init() {
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region
|
||||
// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Linode Object Storage API.",
|
||||
Provider: "Linode",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nl-ams-1.linodeobjects.com",
|
||||
Help: "Amsterdam (Netherlands), nl-ams-1",
|
||||
}, {
|
||||
Value: "us-southeast-1.linodeobjects.com",
|
||||
Help: "Atlanta, GA (USA), us-southeast-1",
|
||||
}, {
|
||||
Value: "in-maa-1.linodeobjects.com",
|
||||
Help: "Chennai (India), in-maa-1",
|
||||
}, {
|
||||
Value: "us-ord-1.linodeobjects.com",
|
||||
Help: "Chicago, IL (USA), us-ord-1",
|
||||
}, {
|
||||
Value: "eu-central-1.linodeobjects.com",
|
||||
Help: "Frankfurt (Germany), eu-central-1",
|
||||
}, {
|
||||
Value: "id-cgk-1.linodeobjects.com",
|
||||
Help: "Jakarta (Indonesia), id-cgk-1",
|
||||
}, {
|
||||
Value: "gb-lon-1.linodeobjects.com",
|
||||
Help: "London 2 (Great Britain), gb-lon-1",
|
||||
}, {
|
||||
Value: "us-lax-1.linodeobjects.com",
|
||||
Help: "Los Angeles, CA (USA), us-lax-1",
|
||||
}, {
|
||||
Value: "es-mad-1.linodeobjects.com",
|
||||
Help: "Madrid (Spain), es-mad-1",
|
||||
}, {
|
||||
Value: "au-mel-1.linodeobjects.com",
|
||||
Help: "Melbourne (Australia), au-mel-1",
|
||||
}, {
|
||||
Value: "us-mia-1.linodeobjects.com",
|
||||
Help: "Miami, FL (USA), us-mia-1",
|
||||
}, {
|
||||
Value: "it-mil-1.linodeobjects.com",
|
||||
Help: "Milan (Italy), it-mil-1",
|
||||
}, {
|
||||
Value: "us-east-1.linodeobjects.com",
|
||||
Help: "Newark, NJ (USA), us-east-1",
|
||||
}, {
|
||||
Value: "jp-osa-1.linodeobjects.com",
|
||||
Help: "Osaka (Japan), jp-osa-1",
|
||||
}, {
|
||||
Value: "fr-par-1.linodeobjects.com",
|
||||
Help: "Paris (France), fr-par-1",
|
||||
}, {
|
||||
Value: "br-gru-1.linodeobjects.com",
|
||||
Help: "São Paulo (Brazil), br-gru-1",
|
||||
}, {
|
||||
Value: "us-sea-1.linodeobjects.com",
|
||||
Help: "Seattle, WA (USA), us-sea-1",
|
||||
}, {
|
||||
Value: "ap-south-1.linodeobjects.com",
|
||||
Help: "Singapore, ap-south-1",
|
||||
}, {
|
||||
Value: "sg-sin-1.linodeobjects.com",
|
||||
Help: "Singapore 2, sg-sin-1",
|
||||
Help: "Singapore ap-south-1",
|
||||
}, {
|
||||
Value: "se-sto-1.linodeobjects.com",
|
||||
Help: "Stockholm (Sweden), se-sto-1",
|
||||
@@ -1389,10 +1356,6 @@ func init() {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo2.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 2",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
@@ -1409,18 +1372,6 @@ func init() {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "lon1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces London 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "tor1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Toronto 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "blr1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Bangalore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
@@ -2713,34 +2664,6 @@ knows about - please make a bug report if not.
|
||||
You can change this if you want to disable the use of multipart uploads.
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_x_id",
|
||||
Help: `Set if rclone should add x-id URL parameters.
|
||||
|
||||
You can change this if you want to disable the AWS SDK from
|
||||
adding x-id URL parameters.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sign_accept_encoding",
|
||||
Help: `Set if rclone should include Accept-Encoding as part of the signature.
|
||||
|
||||
You can change this if you want to stop rclone including
|
||||
Accept-Encoding as part of the signature.
|
||||
|
||||
This shouldn't be necessary in normal operation.
|
||||
|
||||
This should be automatically set correctly for all providers rclone
|
||||
knows about - please make a bug report if not.
|
||||
`,
|
||||
@@ -2797,16 +2720,6 @@ use |-vv| to see the debug level logs.
|
||||
Default: sdkLogMode(0),
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "ibm_api_key",
|
||||
Help: "IBM API Key to be used to obtain IAM token",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
{
|
||||
Name: "ibm_resource_instance_id",
|
||||
Help: "IBM service instance id",
|
||||
Provider: "IBMCOS",
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
@@ -2960,10 +2873,6 @@ type Options struct {
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -3148,67 +3057,59 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Fixup the request if needed.
|
||||
//
|
||||
// Google Cloud Storage alters the Accept-Encoding header, which
|
||||
// breaks the v2 request signature. This is set with opt.SignAcceptEncoding.
|
||||
// breaks the v2 request signature
|
||||
//
|
||||
// It also doesn't like the x-id URL parameter SDKv2 puts in so we
|
||||
// remove that too. This is set with opt.UseXID.Value.
|
||||
// remove that too.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-go-v2/issues/1816.
|
||||
// Adapted from: https://github.com/aws/aws-sdk-go-v2/issues/1816#issuecomment-1927281540
|
||||
func fixupRequest(o *s3.Options, opt *Options) {
|
||||
func fixupGCS(o *s3.Options) {
|
||||
type ignoredHeadersKey struct{}
|
||||
headers := []string{"Accept-Encoding"}
|
||||
|
||||
fixup := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupRequest",
|
||||
"FixupGCS",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
// Delete headers from being signed - will restore later
|
||||
ignored := make(map[string]string, len(headers))
|
||||
for _, h := range headers {
|
||||
ignored[h] = req.Header.Get(h)
|
||||
req.Header.Del(h)
|
||||
}
|
||||
|
||||
if !opt.UseXID.Value {
|
||||
// Remove x-id
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
// Remove x-id because Google doesn't like them
|
||||
if query := req.URL.Query(); query.Has("x-id") {
|
||||
query.Del("x-id")
|
||||
req.URL.RawQuery = query.Encode()
|
||||
}
|
||||
|
||||
// Store ignored on context
|
||||
ctx = middleware.WithStackValue(ctx, ignoredHeadersKey{}, ignored)
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
},
|
||||
)
|
||||
|
||||
// Restore headers if necessary
|
||||
restore := middleware.FinalizeMiddlewareFunc(
|
||||
"FixupRequestRestoreHeaders",
|
||||
"FixupGCSRestoreHeaders",
|
||||
func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
|
||||
req, ok := in.Request.(*smithyhttp.Request)
|
||||
if !ok {
|
||||
return out, metadata, fmt.Errorf("fixupRequest: unexpected request middleware type %T", in.Request)
|
||||
return out, metadata, fmt.Errorf("fixupGCS: unexpected request middleware type %T", in.Request)
|
||||
}
|
||||
|
||||
if !opt.SignAcceptEncoding.Value {
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
// Restore ignored from ctx
|
||||
ignored, _ := middleware.GetStackValue(ctx, ignoredHeadersKey{}).(map[string]string)
|
||||
for k, v := range ignored {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
return next.HandleFinalize(ctx, in)
|
||||
@@ -3254,7 +3155,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
// Try to fill in the config from the environment if env_auth=true
|
||||
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||
|
||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||
// Set the name of the profile if supplied
|
||||
if opt.Profile != "" {
|
||||
@@ -3268,12 +3168,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't load configuration with env_auth=true: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
switch {
|
||||
case opt.Provider == "IBMCOS" && opt.V2Auth:
|
||||
awsConfig.Credentials = &NoOpCredentialsProvider{}
|
||||
fs.Debugf(nil, "Using IBM IAM")
|
||||
case opt.AccessKeyID == "" && opt.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
awsConfig.Credentials = aws.AnonymousCredentials{}
|
||||
@@ -3327,21 +3223,14 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
|
||||
if opt.V2Auth || opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
if opt.Provider == "IBMCOS" && opt.IBMAPIKey != "" && opt.IBMInstanceID != "" {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &IbmIamSigner{APIKey: opt.IBMAPIKey, InstanceID: opt.IBMInstanceID}
|
||||
})
|
||||
} else {
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
}
|
||||
options = append(options, func(s3Opt *s3.Options) {
|
||||
s3Opt.HTTPSignerV4 = &v2Signer{opt: opt}
|
||||
})
|
||||
}
|
||||
|
||||
// Fixup the request if needed
|
||||
if !opt.UseXID.Value || !opt.SignAcceptEncoding.Value {
|
||||
if opt.Provider == "GCS" {
|
||||
options = append(options, func(o *s3.Options) {
|
||||
fixupRequest(o, opt)
|
||||
fixupGCS(o)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3456,8 +3345,6 @@ func setQuirks(opt *Options) {
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
useMultipartUploads = true // Set if provider supports multipart uploads
|
||||
useUnsignedPayload = true // Do we need to use unsigned payloads to avoid seeking in PutObject
|
||||
useXID = true // Add x-id URL parameter into requests
|
||||
signAcceptEncoding = true // If we should include AcceptEncoding in the signature
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
@@ -3602,14 +3489,11 @@ func setQuirks(opt *Options) {
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
signAcceptEncoding = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
// GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
useXID = false
|
||||
default: //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid defaultCaseOrder: consider to make `default` case as first or as last case
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
@@ -3679,18 +3563,6 @@ func setQuirks(opt *Options) {
|
||||
opt.UseUnsignedPayload.Valid = true
|
||||
opt.UseUnsignedPayload.Value = useUnsignedPayload
|
||||
}
|
||||
|
||||
// Set the correct use UseXID if not manually set
|
||||
if !opt.UseXID.Valid {
|
||||
opt.UseXID.Valid = true
|
||||
opt.UseXID.Value = useXID
|
||||
}
|
||||
|
||||
// Set the correct SignAcceptEncoding if not manually set
|
||||
if !opt.SignAcceptEncoding.Valid {
|
||||
opt.SignAcceptEncoding.Valid = true
|
||||
opt.SignAcceptEncoding.Value = signAcceptEncoding
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
|
||||
@@ -2,10 +2,8 @@ package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/cloudsoda/go-smb2"
|
||||
@@ -13,17 +11,14 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.DialContext(ctx, network, addr)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -94,7 +89,15 @@ func (c *conn) close() (err error) {
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
return c.smbSession.Echo() != nil
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@@ -115,20 +118,23 @@ func (f *Fs) getSessions() int32 {
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
//
|
||||
// The context is only used for establishing the connection, not after.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
bgCtx := context.Background()
|
||||
|
||||
c, err = f.dial(bgCtx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
err = c.mountShare(share)
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(bgCtx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@@ -186,29 +192,22 @@ func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err erro
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using an
|
||||
// ECHO request
|
||||
func (f *Fs) putConnection(pc **conn, err error) {
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular SMB error then check the connection
|
||||
if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, os.ErrExist) || errors.Is(err, os.ErrPermission)) {
|
||||
echoErr := c.smbSession.Echo()
|
||||
if echoErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", echoErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
fs.Debugf(f, "Connection OK after error: %v", err)
|
||||
}
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
@@ -236,18 +235,15 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
|
||||
g, _ := errgroup.WithContext(ctx)
|
||||
for i, c := range f.pool {
|
||||
g.Go(func() (err error) {
|
||||
if !c.closed() {
|
||||
err = c.close()
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
f.pool[i] = nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
err = g.Wait()
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
@@ -207,7 +207,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
@@ -268,7 +268,7 @@ func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Obj
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@@ -290,7 +290,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -305,7 +305,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -375,7 +375,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
@@ -412,7 +412,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn, err)
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -430,7 +430,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn, err)
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
@@ -474,7 +474,7 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -556,7 +556,7 @@ func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn, err)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -604,7 +604,7 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn, err)
|
||||
defer o.fs.putConnection(&cn)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
@@ -650,25 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn, err)
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn, err)
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
err = fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
o.fs.putConnection(&cn, err)
|
||||
return nil, err
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn, nil)
|
||||
o.fs.putConnection(&cn)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -698,7 +697,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.fs.putConnection(&cn, err)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
@@ -758,7 +757,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn, err)
|
||||
o.fs.putConnection(&cn)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func init() {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
|
||||
|
||||
// FIXME
|
||||
// err = f.pacer.Call(func() (bool, error) {
|
||||
//err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
@@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
|
||||
func (f *Fs) getAuthToken(ctx context.Context) error {
|
||||
fs.Debugf(f, "Renewing token")
|
||||
|
||||
authRequest := api.TokenAuthRequest{
|
||||
var authRequest = api.TokenAuthRequest{
|
||||
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
|
||||
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
|
||||
RefreshToken: f.opt.RefreshToken,
|
||||
@@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
|
||||
return fmt.Errorf("error reading error out of body: %w", err)
|
||||
}
|
||||
match := findError.FindSubmatch(body)
|
||||
if len(match) < 2 || len(match[1]) == 0 {
|
||||
if match == nil || len(match) < 2 || len(match[1]) == 0 {
|
||||
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
|
||||
}
|
||||
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
|
||||
@@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
|
||||
if strings.EqualFold(item.Name, leaf) {
|
||||
|
||||
@@ -108,8 +108,6 @@ var logReplacements = []string{
|
||||
`^.*?Can't compare hashes, so using check --download.*?$`, dropMe,
|
||||
// ignore timestamps in directory time updates
|
||||
`^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe,
|
||||
// ignore equivalent log for backends lacking dir modtime support
|
||||
`^(INFO : .*?: Making directory).*$`, dropMe,
|
||||
// ignore sizes in directory time updates
|
||||
`^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe,
|
||||
// ignore sizes in directory metadata updates
|
||||
@@ -748,16 +746,6 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "concurrent-func":
|
||||
b.TestFn = func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
dst := "file1.txt"
|
||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
@@ -883,9 +871,10 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
|
||||
@@ -161,7 +161,9 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
return
|
||||
}
|
||||
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -284,7 +286,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@@ -365,7 +367,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
}
|
||||
}
|
||||
|
||||
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
@@ -390,7 +392,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
// if files are identical, leave them alone instead of renaming
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
@@ -484,6 +486,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
@@ -495,11 +498,12 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
return
|
||||
}
|
||||
|
||||
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
@@ -511,7 +515,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
return
|
||||
}
|
||||
|
||||
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
@@ -519,7 +523,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
@@ -527,7 +531,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
|
||||
@@ -359,6 +359,8 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
@@ -368,7 +370,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
@@ -393,11 +395,21 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"file1.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,10 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
|
||||
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"
|
||||
@@ -1,73 +0,0 @@
|
||||
[36m(01) :[0m [34mtest concurrent[0m
|
||||
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(04) :[0m [34mtest changed on one path - file1[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2001-01-02 {datadir/} file5R.txt[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2023-08-26 {datadir/} file7.txt[0m
|
||||
[36m(07) :[0m [34mcopy-as {datadir/}file5R.txt {path2/} file1.txt[0m
|
||||
|
||||
[36m(08) :[0m [34mtest bisync with file changed during[0m
|
||||
[36m(09) :[0m [34mconcurrent-func[0m
|
||||
[36m(10) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - [34mPath2[0m [35m[33mFile changed: [35msize (larger)[0m, [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : Path2: 1 changes: [32m 0 new[0m, [33m 1 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 0 older[0m, [36m 1 larger[0m, [34m 0 smaller[0m)
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file1.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
[36m(11) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -1 +0,0 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer
|
||||
@@ -1 +0,0 @@
|
||||
Newer version
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer and not equal to 5R
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer and not equal to 5L
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer
|
||||
@@ -1 +0,0 @@
|
||||
This file is newer
|
||||
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
15
cmd/bisync/testdata/test_concurrent/scenario.txt
vendored
@@ -1,15 +0,0 @@
|
||||
test concurrent
|
||||
|
||||
test initial bisync
|
||||
bisync resync
|
||||
|
||||
test changed on one path - file1
|
||||
touch-glob 2001-01-02 {datadir/} file5R.txt
|
||||
touch-glob 2023-08-26 {datadir/} file7.txt
|
||||
copy-as {datadir/}file5R.txt {path2/} file1.txt
|
||||
|
||||
test bisync with file changed during
|
||||
concurrent-func
|
||||
bisync
|
||||
|
||||
bisync
|
||||
@@ -430,7 +430,7 @@ func initConfig() {
|
||||
}
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if len(os.Args) >= 2 && os.Args[1] != "rc" {
|
||||
if os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
||||
@@ -303,9 +303,6 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if updateRemoteOpt.NoOutput {
|
||||
return nil
|
||||
}
|
||||
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
|
||||
config.ShowRemote(name)
|
||||
} else {
|
||||
@@ -326,7 +323,6 @@ func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoOutput, "no-output", "", false, "Don't provide any output", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions", "Config")
|
||||
|
||||
@@ -22,9 +22,6 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
|
||||
@@ -4,8 +4,10 @@ package proxy
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -217,8 +219,13 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
|
||||
return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
|
||||
}
|
||||
|
||||
// Add a config hash to ensure configs with different values have different names.
|
||||
// 5 characters length is 5*6 = 30 bits of base64
|
||||
md5sumBinary := md5.Sum([]byte(config.String()))
|
||||
configHash := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])[:5]
|
||||
|
||||
// base name of config on user name. This may appear in logs
|
||||
name := "proxy-" + user
|
||||
name := "proxy-" + user + "-" + configHash
|
||||
fsString := name + ":" + root
|
||||
|
||||
// Look for fs in the VFS cache
|
||||
|
||||
@@ -90,7 +90,8 @@ func TestRun(t *testing.T) {
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, len("proxy-"+testUser+"-")+5, len(f.Name()))
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
@@ -108,7 +109,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err := p.Call(testUser, testPass, false)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
@@ -129,7 +130,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass, false)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
@@ -173,7 +174,7 @@ func TestRun(t *testing.T) {
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
@@ -195,7 +196,7 @@ func TestRun(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
@@ -216,7 +217,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err = p.Call(testUser, publicKeyString, true)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
|
||||
@@ -938,9 +938,3 @@ put them back in again.` >}}
|
||||
* hiddenmarten <hiddenmarten@gmail.com>
|
||||
* Trevor Starick <trevor.starick@gmail.com>
|
||||
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||
* Jess <jess@jessie.cafe>
|
||||
* Zachary Vorhies <zachvorhies@protonmail.com>
|
||||
* Alexander Minbaev <minbaev@gmail.com>
|
||||
* Joel K Biju <joelkbiju18@gmail.com>
|
||||
* ll3006 <doublel3006@gmail.com>
|
||||
* jbagwell-akamai <113531113+jbagwell-akamai@users.noreply.github.com>
|
||||
|
||||
@@ -1815,9 +1815,6 @@ about _Unison_ and synchronization in general.
|
||||
|
||||
## Changelog
|
||||
|
||||
### `v1.69.1`
|
||||
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions
|
||||
|
||||
### `v1.68`
|
||||
* Fixed an issue affecting backends that round modtimes to a lower precision.
|
||||
|
||||
|
||||
@@ -7,8 +7,6 @@ versionIntroduced: v1.65
|
||||
---
|
||||
# rclone serve nfs
|
||||
|
||||
*Not available in Windows.*
|
||||
|
||||
Serve the remote as an NFS mount
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -2958,7 +2958,7 @@ Choose a number from below, or type in your own value
|
||||
location_constraint>1
|
||||
```
|
||||
|
||||
8. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
|
||||
```
|
||||
Canned ACL used when creating buckets and/or storing objects in S3.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
@@ -2974,7 +2974,8 @@ Choose a number from below, or type in your own value
|
||||
acl> 1
|
||||
```
|
||||
|
||||
9. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
|
||||
12. Review the displayed configuration and accept to save the "remote" then quit. The config file should look like this
|
||||
```
|
||||
[xxx]
|
||||
type = s3
|
||||
@@ -2986,7 +2987,7 @@ acl> 1
|
||||
acl = private
|
||||
```
|
||||
|
||||
10. Execute rclone commands
|
||||
13. Execute rclone commands
|
||||
```
|
||||
1) Create a bucket.
|
||||
rclone mkdir IBM-COS-XREGION:newbucket
|
||||
@@ -3005,35 +3006,6 @@ acl> 1
|
||||
rclone delete IBM-COS-XREGION:newbucket/file.txt
|
||||
```
|
||||
|
||||
#### IBM IAM authentication
|
||||
If using IBM IAM authentication with IBM API KEY you need to fill in these additional parameters
|
||||
1. Select false for env_auth
|
||||
2. Leave `access_key_id` and `secret_access_key` blank
|
||||
3. Paste your `ibm_api_key`
|
||||
```
|
||||
Option ibm_api_key.
|
||||
IBM API Key to be used to obtain IAM token
|
||||
Enter a value of type string. Press Enter for the default (1).
|
||||
ibm_api_key>
|
||||
```
|
||||
4. Paste your `ibm_resource_instance_id`
|
||||
```
|
||||
Option ibm_resource_instance_id.
|
||||
IBM service instance id
|
||||
Enter a value of type string. Press Enter for the default (2).
|
||||
ibm_resource_instance_id>
|
||||
```
|
||||
5. In advanced settings type true for `v2_auth`
|
||||
```
|
||||
Option v2_auth.
|
||||
If true use v2 authentication.
|
||||
If this is false (the default) then rclone will use v4 authentication.
|
||||
If it is set then rclone will use v2 authentication.
|
||||
Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
v2_auth>
|
||||
```
|
||||
|
||||
### IDrive e2 {#idrive-e2}
|
||||
|
||||
Here is an example of making an [IDrive e2](https://www.idrive.com/e2/)
|
||||
@@ -4873,49 +4845,27 @@ Option endpoint.
|
||||
Endpoint for Linode Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Amsterdam (Netherlands), nl-ams-1
|
||||
\ (nl-ams-1.linodeobjects.com)
|
||||
2 / Atlanta, GA (USA), us-southeast-1
|
||||
1 / Atlanta, GA (USA), us-southeast-1
|
||||
\ (us-southeast-1.linodeobjects.com)
|
||||
3 / Chennai (India), in-maa-1
|
||||
\ (in-maa-1.linodeobjects.com)
|
||||
4 / Chicago, IL (USA), us-ord-1
|
||||
2 / Chicago, IL (USA), us-ord-1
|
||||
\ (us-ord-1.linodeobjects.com)
|
||||
5 / Frankfurt (Germany), eu-central-1
|
||||
3 / Frankfurt (Germany), eu-central-1
|
||||
\ (eu-central-1.linodeobjects.com)
|
||||
6 / Jakarta (Indonesia), id-cgk-1
|
||||
\ (id-cgk-1.linodeobjects.com)
|
||||
7 / London 2 (Great Britain), gb-lon-1
|
||||
\ (gb-lon-1.linodeobjects.com)
|
||||
8 / Los Angeles, CA (USA), us-lax-1
|
||||
\ (us-lax-1.linodeobjects.com)
|
||||
9 / Madrid (Spain), es-mad-1
|
||||
\ (es-mad-1.linodeobjects.com)
|
||||
10 / Melbourne (Australia), au-mel-1
|
||||
\ (au-mel-1.linodeobjects.com)
|
||||
11 / Miami, FL (USA), us-mia-1
|
||||
\ (us-mia-1.linodeobjects.com)
|
||||
12 / Milan (Italy), it-mil-1
|
||||
4 / Milan (Italy), it-mil-1
|
||||
\ (it-mil-1.linodeobjects.com)
|
||||
13 / Newark, NJ (USA), us-east-1
|
||||
5 / Newark, NJ (USA), us-east-1
|
||||
\ (us-east-1.linodeobjects.com)
|
||||
14 / Osaka (Japan), jp-osa-1
|
||||
\ (jp-osa-1.linodeobjects.com)
|
||||
15 / Paris (France), fr-par-1
|
||||
6 / Paris (France), fr-par-1
|
||||
\ (fr-par-1.linodeobjects.com)
|
||||
16 / São Paulo (Brazil), br-gru-1
|
||||
\ (br-gru-1.linodeobjects.com)
|
||||
17 / Seattle, WA (USA), us-sea-1
|
||||
7 / Seattle, WA (USA), us-sea-1
|
||||
\ (us-sea-1.linodeobjects.com)
|
||||
18 / Singapore, ap-south-1
|
||||
8 / Singapore ap-south-1
|
||||
\ (ap-south-1.linodeobjects.com)
|
||||
19 / Singapore 2, sg-sin-1
|
||||
\ (sg-sin-1.linodeobjects.com)
|
||||
20 / Stockholm (Sweden), se-sto-1
|
||||
9 / Stockholm (Sweden), se-sto-1
|
||||
\ (se-sto-1.linodeobjects.com)
|
||||
21 / Washington, DC, (USA), us-iad-1
|
||||
10 / Washington, DC, (USA), us-iad-1
|
||||
\ (us-iad-1.linodeobjects.com)
|
||||
endpoint> 5
|
||||
endpoint> 3
|
||||
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
|
||||
@@ -57,8 +57,7 @@ off donation.
|
||||
Thank you very much to our sponsors:
|
||||
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="285" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
<a href="{{ .Get "link" }}" target="_blank" >
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" styleX="{{ .Get "style" | safeCSS }}" style="margin: 2px; padding: 1px; border: 1px solid #ddd; border-radius: 4px;">
|
||||
<img width="{{ .Get "width" }}" src="{{ .Get "src" }}" title="{{ .Get "title" }}" style="{{ .Get "style" | safeCSS }}">
|
||||
</a>
|
||||
|
||||
@@ -65,29 +65,28 @@ type StatsInfo struct {
|
||||
}
|
||||
|
||||
type averageValues struct {
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
started bool
|
||||
mu sync.Mutex
|
||||
lpBytes int64
|
||||
lpTime time.Time
|
||||
speed float64
|
||||
stop chan bool
|
||||
stopped sync.WaitGroup
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewStats creates an initialised StatsInfo
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
s := &StatsInfo{
|
||||
return &StatsInfo{
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{},
|
||||
average: averageValues{stop: make(chan bool)},
|
||||
}
|
||||
s.startAverageLoop()
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
@@ -329,96 +328,61 @@ func (s *StatsInfo) averageLoop() {
|
||||
ticker := time.NewTicker(averagePeriodLength)
|
||||
defer ticker.Stop()
|
||||
|
||||
startTime := time.Now()
|
||||
a := &s.average
|
||||
defer a.stopped.Done()
|
||||
|
||||
shouldRun := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case now := <-ticker.C:
|
||||
a.mu.Lock()
|
||||
|
||||
if !shouldRun {
|
||||
a.mu.Unlock()
|
||||
continue
|
||||
var elapsed float64
|
||||
if a.lpTime.IsZero() {
|
||||
elapsed = now.Sub(startTime).Seconds()
|
||||
} else {
|
||||
elapsed = now.Sub(a.lpTime).Seconds()
|
||||
}
|
||||
|
||||
avg := 0.0
|
||||
elapsed := now.Sub(a.lpTime).Seconds()
|
||||
if elapsed > 0 {
|
||||
avg = float64(a.lpBytes) / elapsed
|
||||
}
|
||||
|
||||
if period < averagePeriod {
|
||||
period++
|
||||
}
|
||||
|
||||
a.speed = (avg + a.speed*(period-1)) / period
|
||||
a.lpBytes = 0
|
||||
a.lpTime = now
|
||||
|
||||
a.mu.Unlock()
|
||||
|
||||
case stop, ok := <-a.stop:
|
||||
if !ok {
|
||||
return // Channel closed, exit the loop
|
||||
}
|
||||
|
||||
a.mu.Lock()
|
||||
|
||||
// If we are resuming, store the current time
|
||||
if !shouldRun && !stop {
|
||||
a.lpTime = time.Now()
|
||||
}
|
||||
shouldRun = !stop
|
||||
|
||||
a.mu.Unlock()
|
||||
case <-a.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Resume the average loop
|
||||
func (s *StatsInfo) resumeAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- false
|
||||
}
|
||||
|
||||
// Pause the average loop
|
||||
func (s *StatsInfo) pauseAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.average.stop <- true
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _startAverageLoop() {
|
||||
if !s.average.started {
|
||||
s.average.stop = make(chan bool)
|
||||
s.average.started = true
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the average loop
|
||||
func (s *StatsInfo) startAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s._startAverageLoop()
|
||||
s.average.startOnce.Do(func() {
|
||||
s.average.stopped.Add(1)
|
||||
go s.averageLoop()
|
||||
})
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
//
|
||||
// Call with the mutex held
|
||||
func (s *StatsInfo) _stopAverageLoop() {
|
||||
if s.average.started {
|
||||
s.average.stopOnce.Do(func() {
|
||||
close(s.average.stop)
|
||||
s.average.stopped.Wait()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Stop the average loop
|
||||
func (s *StatsInfo) stopAverageLoop() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s._stopAverageLoop()
|
||||
}
|
||||
|
||||
// String convert the StatsInfo to a string for printing
|
||||
@@ -600,9 +564,9 @@ func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytesRead, size := tr.acc.progress()
|
||||
if bytesRead < size {
|
||||
pending += size - bytesRead
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -735,8 +699,7 @@ func (s *StatsInfo) ResetCounters() {
|
||||
s.oldDuration = 0
|
||||
|
||||
s._stopAverageLoop()
|
||||
s.average = averageValues{}
|
||||
s._startAverageLoop()
|
||||
s.average = averageValues{stop: make(chan bool)}
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
|
||||
@@ -825,7 +788,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
}
|
||||
tr := newTransfer(s, obj, srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.resumeAverageLoop()
|
||||
s.startAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@@ -833,7 +796,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry, dstFs fs.Fs) *Transfer {
|
||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64, srcFs, dstFs fs.Fs) *Transfer {
|
||||
tr := newTransferRemoteSize(s, remote, size, false, "", srcFs, dstFs)
|
||||
s.transferring.add(tr)
|
||||
s.resumeAverageLoop()
|
||||
s.startAverageLoop()
|
||||
return tr
|
||||
}
|
||||
|
||||
@@ -848,7 +811,7 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
||||
s.mu.Unlock()
|
||||
}
|
||||
if s.transferring.empty() && s.checking.empty() {
|
||||
s.pauseAverageLoop()
|
||||
time.AfterFunc(averageStopAfter, s.stopAverageLoop)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -514,8 +514,6 @@ type UpdateRemoteOpt struct {
|
||||
Obscure bool `json:"obscure"`
|
||||
// Treat all passwords as obscured
|
||||
NoObscure bool `json:"noObscure"`
|
||||
// Don't provide any output
|
||||
NoOutput bool `json:"noOutput"`
|
||||
// Don't interact with the user - return questions
|
||||
NonInteractive bool `json:"nonInteractive"`
|
||||
// If set then supply state and result parameters to continue the process
|
||||
|
||||
@@ -120,7 +120,6 @@ func init() {
|
||||
extraHelp += `- opt - a dictionary of options to control the configuration
|
||||
- obscure - declare passwords are plain and need obscuring
|
||||
- noObscure - declare passwords are already obscured and don't need obscuring
|
||||
- noOutput - don't print anything to stdout
|
||||
- nonInteractive - don't interact with a user, return questions
|
||||
- continue - continue the config process with an answer
|
||||
- all - ask all the config questions not just the post config ones
|
||||
|
||||
@@ -1109,17 +1109,23 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
if !s.setDirModTimeAfter && equal {
|
||||
return nil
|
||||
}
|
||||
newDst = dst
|
||||
if !equal {
|
||||
if s.setDirMetadata && s.copyEmptySrcDirs {
|
||||
if s.setDirModTimeAfter && equal {
|
||||
newDst = dst
|
||||
} else if s.copyEmptySrcDirs {
|
||||
if s.setDirMetadata {
|
||||
newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src)
|
||||
} else if dst == nil && s.setDirModTime && s.copyEmptySrcDirs {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else if dst == nil && s.copyEmptySrcDirs {
|
||||
} else if s.setDirModTime {
|
||||
if dst == nil {
|
||||
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
||||
} else {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else if dst == nil {
|
||||
// Create the directory if it doesn't exist
|
||||
err = operations.Mkdir(ctx, f, dir)
|
||||
} else if dst != nil && s.setDirModTime {
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
} else {
|
||||
newDst = dst
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
|
||||
@@ -2762,81 +2762,6 @@ func TestSyncConcurrentTruncate(t *testing.T) {
|
||||
testSyncConcurrent(t, "truncate")
|
||||
}
|
||||
|
||||
// Test that sync replaces dir modtimes in dst if they've changed
|
||||
func testSyncReplaceDirModTime(t *testing.T, copyEmptySrcDirs bool) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx, _ := fs.AddConfig(context.Background())
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
file1 := r.WriteFile("file1", "file1", t2)
|
||||
file2 := r.WriteFile("test_dir1/file2", "file2", t2)
|
||||
file3 := r.WriteFile("test_dir2/sub_dir/file3", "file3", t2)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
|
||||
_, err := operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A directory that's empty on both src and dst
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
_, err = operations.MkdirModTime(ctx, r.Fremote, "empty_on_remote", t2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set logging
|
||||
// (this checks log output as DirModtime operations do not yet have stats, and r.CheckDirectoryModTimes also does not tell us what actions were taken)
|
||||
oldLogLevel := fs.GetConfig(context.Background()).LogLevel
|
||||
defer func() { fs.GetConfig(context.Background()).LogLevel = oldLogLevel }() // reset to old val after test
|
||||
// need to do this as fs.Infof only respects the globalConfig
|
||||
fs.GetConfig(context.Background()).LogLevel = fs.LogLevelInfo
|
||||
|
||||
// First run
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output := bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
|
||||
// Save all dirs
|
||||
dirs := []string{"test_dir1", "test_dir2", "test_dir2/sub_dir", "empty_on_remote"}
|
||||
if copyEmptySrcDirs {
|
||||
dirs = append(dirs, "empty_dir")
|
||||
}
|
||||
|
||||
// Change dir modtimes
|
||||
for _, dir := range dirs {
|
||||
_, err := operations.SetDirModTime(ctx, r.Flocal, nil, dir, t1)
|
||||
if err != nil && !errors.Is(err, fs.ErrorNotImplemented) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run again
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
output = bilib.CaptureOutput(func() {
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, copyEmptySrcDirs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
require.NotNil(t, output)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
r.CheckRemoteItems(t, file1, file2, file3)
|
||||
|
||||
// Check that the modtimes of the directories are as expected
|
||||
r.CheckDirectoryModTimes(t, dirs...)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTime(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, false)
|
||||
}
|
||||
|
||||
func TestSyncReplaceDirModTimeWithEmptyDirs(t *testing.T) {
|
||||
testSyncReplaceDirModTime(t, true)
|
||||
}
|
||||
|
||||
// Tests that nothing is transferred when src and dst already match
|
||||
// Run the same sync twice, ensure no action is taken the second time
|
||||
func testNothingToTransfer(t *testing.T, copyEmptySrcDirs bool) {
|
||||
|
||||
15
go.mod
15
go.mod
@@ -25,7 +25,7 @@ require (
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
@@ -91,7 +91,6 @@ require (
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/uplink v1.13.1
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -110,7 +109,6 @@ require (
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
@@ -148,11 +146,6 @@ require (
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.1 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
@@ -179,16 +172,14 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
@@ -212,7 +203,6 @@ require (
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
@@ -233,7 +223,6 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
|
||||
41
go.sum
41
go.sum
@@ -63,8 +63,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107 h1:TRDGQGYANuuMxX8JU++oQKEGitJpKWC1EB0SbNXRVqI=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5 h1:AjGC7xNee5tgDIjndekBDW5AbypdERHSgib3EZ1KNsA=
|
||||
github.com/IBM/go-sdk-core/v5 v5.17.5/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
@@ -108,8 +106,6 @@ github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsVi
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
|
||||
@@ -181,8 +177,8 @@ github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vc
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0 h1:8C76QklmuV4qmKAC7cUnu9D68X9kCkFMuLspPikECCo=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed h1:KrdJUJWhJ1UWhvaP6SBsvG356KjqfdDjcS/4xTswAU4=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250124173933-e6bbeea507ed/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 h1:mLY/79N73URZ2J/oRKTxmfhCgxThzBmjQ6XOjX5tYjI=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6/go.mod h1:0aLYPsmguHbok591y6hI5yAqU0drbUzrPEO10ZpgTTw=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
@@ -237,6 +233,8 @@ github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y=
|
||||
github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
@@ -270,12 +268,6 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc=
|
||||
github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
@@ -284,6 +276,7 @@ github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBEx
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@@ -390,6 +383,7 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -470,8 +464,6 @@ github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
|
||||
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@@ -482,14 +474,17 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1 h1:JYc47bk8n/MUchA2KHu1ggsCQzlJZQLJ+tTKfOho00E=
|
||||
@@ -595,6 +590,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -648,8 +644,6 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
@@ -740,6 +734,7 @@ golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -761,6 +756,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@@ -807,6 +803,7 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -816,9 +813,12 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -837,6 +837,7 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -943,6 +944,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
@@ -1043,12 +1045,15 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY=
|
||||
gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -108,7 +108,7 @@ func (conf *Config) MakeOauth2Config() *oauth2.Config {
|
||||
return &oauth2.Config{
|
||||
ClientID: conf.ClientID,
|
||||
ClientSecret: conf.ClientSecret,
|
||||
RedirectURL: conf.RedirectURL,
|
||||
RedirectURL: RedirectLocalhostURL,
|
||||
Scopes: conf.Scopes,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: conf.AuthURL,
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -190,13 +189,7 @@ func checkDrainAndClose(r io.ReadCloser, err *error) {
|
||||
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
|
||||
defer checkDrainAndClose(resp.Body, &err)
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
err = decoder.Decode(result)
|
||||
if err != nil {
|
||||
// Retry this as it is likely some overloaded server sending HTML instead of JSON
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
err = fserrors.RetryError(fmt.Errorf("failed to decode JSON from Content-Type: %q: %v", contentType, err))
|
||||
}
|
||||
return err
|
||||
return decoder.Decode(result)
|
||||
}
|
||||
|
||||
// DecodeXML decodes resp.Body into result
|
||||
|
||||
@@ -227,10 +227,7 @@ func (c *Cache) createItemDir(name string) (string, error) {
|
||||
|
||||
// getBackend gets a backend for a cache root dir
|
||||
func getBackend(ctx context.Context, parentPath string, name string, relativeDirPath string) (fs.Fs, error) {
|
||||
// Make sure we turn off the global links flag as it overrides the backend specific one
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Links = false
|
||||
path := fmt.Sprintf(":local,encoding='%v',links=false:%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
path := fmt.Sprintf(":local,encoding='%v':%s/%s/%s", encoder.OS, parentPath, name, relativeDirPath)
|
||||
return fscache.Get(ctx, path)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user