1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-08 11:33:33 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
bd33b0e144 local: fix permission and ownership on symlinks with --links and --metadata
Before this change, if writing to a local backend with --metadata and
--links, if the incoming metadata contained mode or ownership
information then rclone would apply the mode/ownership to the
destination of the link not the link itself.

This fixes the problem by using the link safe sycall variants
lchown/fchmodat when --links and --metadata is in use. Note that Linux
does not support setting permissions on symlinks, so rclone emits a
debug message in this case.

This also fixes setting times on symlinks on Windows which wasn't
implemented for atime, mtime and was incorrectly setting the target of
the symlink for btime.

See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
2024-11-14 12:51:18 +00:00
280 changed files with 11750 additions and 17495 deletions

View File

@@ -26,7 +26,7 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
include: include:
- job_name: linux - job_name: linux
@@ -80,6 +80,18 @@ jobs:
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.21
os: ubuntu-latest
go: '1.21'
quicktest: true
racequicktest: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }} name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@@ -111,8 +123,7 @@ jobs:
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get update sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS

View File

@@ -0,0 +1,77 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -1,294 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,89 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -1,47 +1,19 @@
FROM golang:alpine AS builder FROM golang:alpine AS builder
ARG CGO_ENABLED=0 COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \ RUN apk add --no-cache make bash gawk git
go env -w GOCACHE=/root/.cache/go-build RUN \
CGO_ENABLED=0 \
RUN echo "**** Install Dependencies ****" && \ make
apk add --no-cache \ RUN ./rclone version
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \ RUN apk --no-cache add ca-certificates fuse3 tzdata && \
apk add --no-cache \ echo "user_allow_other" >> /etc/fuse.conf
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

2950
MANUAL.html generated

File diff suppressed because it is too large Load Diff

2810
MANUAL.md generated

File diff suppressed because it is too large Load Diff

2925
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,20 @@
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only) [<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only) [<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)

View File

@@ -86,16 +86,6 @@ build.
Once it compiles locally, push it on a test branch and commit fixes Once it compiles locally, push it on a test branch and commit fixes
until the tests pass. until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta ## Tidy beta
At some point after the release run At some point after the release run
@@ -124,8 +114,8 @@ Now
* git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
* git cherry-pick any fixes * git cherry-pick any fixes
* make startstable
* Do the steps as above * Do the steps as above
* make startstable
* git co master * git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md * git checkout ${BASE_TAG}-stable docs/content/changelog.md

View File

@@ -1 +1 @@
v1.69.2 v1.69.0

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"

View File

@@ -2162,9 +2162,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
if chunkNumber <= 8 { if chunkNumber <= 8 {
return w.f.shouldRetry(ctx, err) return w.f.shouldRetry(ctx, err)
} }
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few // retry all chunks once have done the first few
return true, err return true, err
} }

View File

@@ -393,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
policyClientOptions := policy.ClientOptions{ policyClientOptions := policy.ClientOptions{
Transport: newTransporter(ctx), Transport: newTransporter(ctx),
} }
backup := service.ShareTokenIntentBackup
clientOpt := service.ClientOptions{ clientOpt := service.ClientOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
FileRequestIntent: &backup,
} }
// Here we auth by setting one of cred, sharedKeyCred or f.client // Here we auth by setting one of cred, sharedKeyCred or f.client
@@ -899,7 +897,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
// //
// May make a network request because the [fs.List] method does not // May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry // return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 { if ty != hash.MD5 {

View File

@@ -42,10 +42,9 @@ type Bucket struct {
// LifecycleRule is a single lifecycle rule // LifecycleRule is a single lifecycle rule
type LifecycleRule struct { type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"` DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"` DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"` FileNamePrefix string `json:"fileNamePrefix"`
FileNamePrefix string `json:"fileNamePrefix"`
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base

View File

@@ -299,13 +299,14 @@ type Fs struct {
// Object describes a b2 object // Object describes a b2 object
type Object struct { type Object struct {
fs *Fs // what this object is part of fs *Fs // what this object is part of
remote string // The remote path remote string // The remote path
id string // b2 id of the file id string // b2 id of the file
modTime time.Time // The modified time of the object if known modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known sha1 string // SHA-1 hash if known
size int64 // Size of the object size int64 // Size of the object
mimeType string // Content-Type of the object mimeType string // Content-Type of the object
meta map[string]string // The object metadata if known - may be nil - with lower case keys
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -1597,6 +1598,9 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if err != nil { if err != nil {
return err return err
} }
// For now, just set "mtime" in metadata
o.meta = make(map[string]string, 1)
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
return nil return nil
} }
@@ -1876,6 +1880,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
Info: Info, Info: Info,
} }
// Embryonic metadata support - just mtime
o.meta = make(map[string]string, 1)
modTime, err := parseTimeStringHelper(info.Info[timeKey])
if err == nil {
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
}
// When reading files from B2 via cloudflare using // When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length // --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old // headers (presumably so it can inject stuff) so use the old
@@ -2220,7 +2231,6 @@ This will dump something like this showing the lifecycle rules.
{ {
"daysFromHidingToDeleting": 1, "daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null, "daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": "" "fileNamePrefix": ""
} }
] ]
@@ -2247,9 +2257,8 @@ overwrites will still cause versions to be made.
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`, `,
Opts: map[string]string{ Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.", "daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden", "daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
}, },
} }
@@ -2269,13 +2278,6 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
} }
newRule.DaysFromUploadingToHiding = &days newRule.DaysFromUploadingToHiding = &days
} }
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("") bucketName, _ := f.split("")
if bucketName == "" { if bucketName == "" {
return nil, errors.New("bucket required") return nil, errors.New("bucket required")
@@ -2283,7 +2285,7 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
} }
var bucket *api.Bucket var bucket *api.Bucket
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil { if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName) bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -256,6 +256,12 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
assert.Equal(t, v, got, k) assert.Equal(t, v, got, k)
} }
// mtime
for k, v := range metadata {
got := o.meta[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type") assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header // Modification time from the x-bz-info-src_last_modified_millis header

View File

@@ -46,6 +46,7 @@ import (
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
) )
const ( const (
@@ -64,10 +65,12 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://app.box.com/api/oauth2/token", AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -255,9 +258,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 { if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err) return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }

View File

@@ -2480,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json") return nil, false, errors.New("invalid json")
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,711 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -80,10 +80,9 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
driveConfig = &oauthutil.Config{ driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"}, Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -203,6 +202,7 @@ func driveScopesContainsAppFolder(scopes []string) bool {
if scope == scopePrefix+"drive.appfolder" { if scope == scopePrefix+"drive.appfolder" {
return true return true
} }
} }
return false return false
} }
@@ -1211,7 +1211,6 @@ func fixMimeType(mimeTypeIn string) string {
} }
return mimeTypeOut return mimeTypeOut
} }
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) { func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in)) out = make(map[string][]string, len(in))
for k, v := range in { for k, v := range in {
@@ -1222,11 +1221,9 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
} }
return out return out
} }
func isInternalMimeType(mimeType string) bool { func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.") return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
} }
func isLinkMimeType(mimeType string) bool { func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-") return strings.HasPrefix(mimeType, "application/x-link-")
} }
@@ -1659,8 +1656,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo( func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File, ctx context.Context, remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool, extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if // Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item // we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing. // will have been resolved so this will do nothing.
@@ -1763,7 +1759,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
} }
var updateMetadata updateMetadataFn var updateMetadata updateMetadataFn
if len(metadata) > 0 { if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true) updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err) return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
} }
@@ -1794,7 +1790,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
} }
dirID = actualID(dirID) dirID = actualID(dirID)
updateInfo := &drive.File{} updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true) updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
if err != nil { if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err) return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
} }
@@ -1851,7 +1847,6 @@ func linkTemplate(mt string) *template.Template {
}) })
return _linkTemplates[mt] return _linkTemplates[mt]
} }
func (f *Fs) fetchFormats(ctx context.Context) { func (f *Fs) fetchFormats(ctx context.Context) {
fetchFormatsOnce.Do(func() { fetchFormatsOnce.Do(func() {
var about *drive.About var about *drive.About
@@ -1897,8 +1892,7 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
// Look through the exportExtensions and find the first format that can be // Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false) // converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) ( func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
extension, mimeType string, isDocument bool, extension, mimeType string, isDocument bool) {
) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType] exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
if isDocument { if isDocument {
for _, _extension := range f.exportExtensions { for _, _extension := range f.exportExtensions {
@@ -2694,7 +2688,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if shortcutID != "" { if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash) return f.delete(ctx, shortcutID, f.opt.UseTrash)
} }
trashedFiles := false var trashedFiles = false
if check { if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool { found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
if !item.Trashed { if !item.Trashed {
@@ -2931,6 +2925,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
err := f.svc.Files.EmptyTrash().Context(ctx).Do() err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return err return err
} }
@@ -3191,7 +3186,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
} }
}() }()
} }
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) { func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
var startPageToken *drive.StartPageToken var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -3995,13 +3989,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
case "query": case "query":
if len(arg) == 1 { if len(arg) == 1 {
query := arg[0] query := arg[0]
results, err := f.query(ctx, query) var results, err = f.query(ctx, query)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err) return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
} }
return results, nil return results, nil
} else {
return nil, errors.New("need a query argument")
} }
return nil, errors.New("need a query argument")
case "rescue": case "rescue":
dirID := "" dirID := ""
_, delete := opt["delete"] _, delete := opt["delete"]
@@ -4061,7 +4056,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 { if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
@@ -4076,8 +4070,7 @@ func (o *baseObject) Size() int64 {
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error, info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -4290,13 +4283,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
return o.baseObject.open(ctx, o.url, options...) return o.baseObject.open(ctx, o.url, options...)
} }
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from // Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking // the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted. // the transfer as corrupted.
var offset, end int64 = 0, -1 var offset, end int64 = 0, -1
newOptions := options[:0] var newOptions = options[:0]
for _, o := range options { for _, o := range options {
// Note that Range requests don't work on Google docs: // Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download // https://developers.google.com/drive/v3/web/manage-downloads#partial_download
@@ -4323,10 +4315,9 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
} }
return return
} }
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
data := o.content var data = o.content
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -4351,8 +4342,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo, src fs.ObjectInfo) (info *drive.File, err error) {
) (info *drive.File, err error) {
// Make the API request to upload metadata and file data. // Make the API request to upload metadata and file data.
size := src.Size() size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) { if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
@@ -4430,7 +4420,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src) srcMimeType := fs.MimeType(ctx, src)
importMimeType := "" importMimeType := ""
@@ -4526,7 +4515,6 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
func (o *documentObject) ext() string { func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }
func (o *linkObject) ext() string { func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }

View File

@@ -508,7 +508,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
// //
// It returns a callback which should be called to finish the updates // It returns a callback which should be called to finish the updates
// after the data is uploaded. // after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) { func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{} callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error { callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns { for _, fn := range callbackFns {
@@ -533,9 +533,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
} }
switch k { switch k {
case "copy-requires-writer-permission": case "copy-requires-writer-permission":
if isFolder { if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err return nil, err
} }
case "writers-can-share": case "writers-can-share":
@@ -632,7 +630,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err) return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
} }
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false) callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err) return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
} }

View File

@@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
) )
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -94,7 +94,7 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
dropboxConfig = &oauthutil.Config{ dropboxConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"files.metadata.write", "files.metadata.write",
"files.content.write", "files.content.write",
@@ -109,8 +109,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize", // AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token", // TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// }, // },
AuthURL: dropbox.OAuthEndpoint("").AuthURL, Endpoint: dropbox.OAuthEndpoint(""),
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -135,7 +134,7 @@ var (
) )
// Gets an oauth config with the right scopes // Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config { func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes // If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" { if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig return dropboxConfig
@@ -318,46 +317,32 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Some specific errors which should be excluded from retries // shouldRetry returns a boolean as to whether this err deserves to be
func shouldRetryExclude(ctx context.Context, err error) (bool, error) { // retried. It returns the err as a convenience
if err == nil { func shouldRetry(ctx context.Context, err error) (bool, error) {
return false, err
}
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
// First check for specific errors if err == nil {
// return false, err
// These come back from the SDK in a whole host of different }
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
errString := err.Error() errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter) fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
@@ -1174,16 +1159,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) { sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1724,10 +1699,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil) entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -60,17 +60,14 @@ const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
) )
var ( // Description of how to auth for this app
// Description of how to auth for this app var storageConfig = &oauth2.Config{
storageConfig = &oauthutil.Config{ Scopes: []string{storage.DevstorageReadWriteScope},
Scopes: []string{storage.DevstorageReadWriteScope}, Endpoint: google.Endpoint,
AuthURL: google.Endpoint.AuthURL, ClientID: rcloneClientID,
TokenURL: google.Endpoint.TokenURL, ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
ClientID: rcloneClientID, RedirectURL: oauthutil.RedirectURL,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), }
RedirectURL: oauthutil.RedirectURL,
}
)
// Register with Fs // Register with Fs
func init() { func init() {

View File

@@ -33,6 +33,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
) )
@@ -59,14 +60,13 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"openid", "openid",
"profile", "profile",
scopeReadWrite, // this must be at position scopeAccess scopeReadWrite, // this must be at position scopeAccess
}, },
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -1168,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errors := make([]error, 1) errors := make([]error, 1)
results := make([]*api.MediaItem, 1) results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors) err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err == nil { if err != nil {
err = errors[0] err = errors[0]
info = results[0] info = results[0]
} }

View File

@@ -2,7 +2,6 @@ package googlephotos
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:" *fstest.RemoteName = "TestGooglePhotos:"
} }
f, err := fs.NewFs(ctx, *fstest.RemoteName) f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) { if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err) t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
} }
require.NoError(t, err) require.NoError(t, err)

View File

@@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -47,9 +48,11 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app. // Description of how to auth for this app.
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
AuthURL: "https://my.hidrive.com/client/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://my.hidrive.com/oauth2/token", AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL, RedirectURL: oauthutil.TitleBarRedirectURL,

View File

@@ -180,6 +180,7 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
} }
addHeaders(req, opt) addHeaders(req, opt)
res, err := noRedir.Do(req) res, err := noRedir.Do(req)
if err != nil { if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err) fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult() return createFileResult()
@@ -248,14 +249,6 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
f.httpClient = client f.httpClient = client
f.endpoint = u f.endpoint = u
f.endpointURL = u.String() f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil return isFile, nil
} }
@@ -338,13 +331,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL // Join's the remote onto the base URL
func (f *Fs) url(remote string) string { func (f *Fs) url(remote string) string {
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
if f.opt.NoEscape { if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior // Directly concatenate without escaping, no_escape behavior
return f.endpointURL + trimmedRemote return f.endpointURL + remote
} }
// Default behavior // Default behavior
return f.endpointURL + rest.URLPathEscape(trimmedRemote) return f.endpointURL + rest.URLPathEscape(remote)
} }
// Errors returned by parseName // Errors returned by parseName

View File

@@ -191,33 +191,6 @@ func TestNewObject(t *testing.T) {
assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Equal(t, fs.ErrorObjectNotFound, err)
} }
func TestNewObjectWithLeadingSlash(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "/four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "/not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
m := prepareServer(t) m := prepareServer(t)

View File

@@ -631,7 +631,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
FileFlags: FileFlags{ FileFlags: FileFlags{
IsExecutable: true, IsExecutable: true,
IsHidden: false, IsHidden: false,
IsWritable: true, IsWritable: false,
}, },
} }
} }

View File

@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// build request // build request
// can't use normal rename as file needs to be "activated" first // cant use normal rename as file needs to be "activated" first
r := api.NewUpdateFileInfo() r := api.NewUpdateFileInfo()
r.DocumentID = doc.DocumentID r.DocumentID = doc.DocumentID

View File

@@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"` DestinationPath string `validate:"nonzero" json:"destinationPath"`
} }
// JobIDResponse represents response struct with JobID for folder operations // JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct { type JobIDResponse struct {
JobID string `json:"jobId"` JobID string `json:"jobId"`
} }

View File

@@ -277,9 +277,11 @@ machines.`)
m.Set(configClientID, teliaseCloudClientID) m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL) m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: teliaseCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: teliaseCloudTokenURL, AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaseCloudClientID, ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -290,9 +292,11 @@ machines.`)
m.Set(configClientID, telianoCloudClientID) m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL) m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: telianoCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: telianoCloudTokenURL, AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID, ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -303,9 +307,11 @@ machines.`)
m.Set(configClientID, tele2CloudClientID) m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL) m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: tele2CloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: tele2CloudTokenURL, AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID, ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -316,9 +322,11 @@ machines.`)
m.Set(configClientID, onlimeCloudClientID) m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL) m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: onlimeCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: onlimeCloudTokenURL, AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID, ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
} }
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauthutil.Config{ oauthConfig := &oauth2.Config{
AuthURL: defaultTokenURL, Endpoint: oauth2.Endpoint{
TokenURL: defaultTokenURL, AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
} }
if ver == configVersion { if ver == configVersion {
oauthConfig.ClientID = defaultClientID oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults // if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok { if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.TokenURL = tokenURL oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL // jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.AuthURL = tokenURL oauthConfig.Endpoint.AuthURL = tokenURL
} }
} else if ver == legacyConfigVersion { } else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID) clientID, ok := m.Get(configClientID)
@@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.ClientID = clientID oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret) oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.TokenURL = legacyTokenURL oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.AuthURL = legacyTokenURL oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh // add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface { if do, ok := baseClient.Transport.(interface {

View File

@@ -5,18 +5,18 @@ package local
import ( import (
"context" "context"
"fmt" "fmt"
"syscall"
"unsafe" "unsafe"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
) )
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64 var available, total, free int64
root, e := windows.UTF16PtrFromString(f.root) root, e := syscall.UTF16PtrFromString(f.root)
if e != nil { if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e) return nil, fmt.Errorf("failed to read disk usage: %w", e)
} }
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
) )
if e1 != windows.Errno(0) { if e1 != syscall.Errno(0) {
return nil, fmt.Errorf("failed to read disk usage: %w", e1) return nil, fmt.Errorf("failed to read disk usage: %w", e1)
} }
usage := &fs.Usage{ usage := &fs.Usage{

View File

@@ -34,6 +34,7 @@ import (
// Constants // Constants
const ( const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
) )
@@ -100,8 +101,10 @@ Metadata is supported on files and directories.
}, },
{ {
Name: "links", Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.", Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false, Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true, Advanced: true,
}, },
{ {
@@ -376,22 +379,17 @@ type Directory struct {
var ( var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links") errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links") errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
) )
// NewFs constructs an Fs from the path // NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Override --local-links with --links if set
if ci.Links {
opt.TranslateSymlinks = true
}
if opt.TranslateSymlinks && opt.FollowSymlinks { if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks return nil, errLinksAndCopyLinks
} }
@@ -437,9 +435,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dev = readDevice(fi, f.opt.OneFileSystem) f.dev = readDevice(fi, f.opt.OneFileSystem)
} }
// Check to see if this is a .rclonelink if not found // Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix) hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) { if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix)) fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
} }
if err == nil && f.isRegular(fi.Mode()) { if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix // Handle the odd case, that a symlink was specified by name without the link suffix
@@ -510,8 +508,8 @@ func (f *Fs) caseInsensitive() bool {
// //
// for regular files, localPath is returned unchanged // for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) { func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix) isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix) newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink return newLocalPath, isTranslatedLink
} }
@@ -694,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else { } else {
// Check whether this link should be translated // Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix newRemote += linkSuffix
} }
// Don't include non directory if not included // Don't include non directory if not included
// we leave directory filtering to the layer above // we leave directory filtering to the layer above

View File

@@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2)) require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink // Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2) file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination // Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1) file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink // Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false) file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes { if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3) r.CheckLocalItems(t, file1, file2, file3)
@@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText) assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object // Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix) o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote()) assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size()) assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version // Check that NewObject doesn't see the non suffixed version
@@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links // Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{ f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true", "links": "true",
}) })
require.Equal(t, fs.ErrorIsFile, err) require.Equal(t, fs.ErrorIsFile, err)
@@ -277,7 +277,7 @@ func TestMetadata(t *testing.T) {
// Write a symlink to the file // Write a symlink to the file
symlinkPath := "metafile-link.txt" symlinkPath := "metafile-link.txt"
osSymlinkPath := filepath.Join(f.root, symlinkPath) osSymlinkPath := filepath.Join(f.root, symlinkPath)
symlinkPath += fs.LinkSuffix symlinkPath += linkSuffix
require.NoError(t, os.Symlink(filePath, osSymlinkPath)) require.NoError(t, os.Symlink(filePath, osSymlinkPath))
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z") symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime)) require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))

View File

@@ -68,12 +68,14 @@ var (
) )
// Description of how to authorize // Description of how to authorize
var oauthConfig = &oauthutil.Config{ var oauthConfig = &oauth2.Config{
ClientID: api.OAuthClientID, ClientID: api.OAuthClientID,
ClientSecret: "", ClientSecret: "",
AuthURL: api.OAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: api.OAuthURL, AuthURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams, TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
},
} }
// Register with Fs // Register with Fs
@@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) { if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.") fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(ctx, f.cli) ctx := oauthutil.Context(ctx, f.cli)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
oauth2Conf := oauthConfig.MakeOauth2Config()
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
} }
if err == nil && !tokenIsValid(t) { if err == nil && !tokenIsValid(t) {
err = errors.New("invalid token") err = errors.New("invalid token")

View File

@@ -396,57 +396,10 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
return nil return nil
} }
// Order the permissions so that any with users come first.
//
// This is to work around a quirk with Graph:
//
// 1. You are adding permissions for both a group and a user.
// 2. The user is a member of the group.
// 3. The permissions for the group and user are the same.
// 4. You are adding the group permission before the user permission.
//
// When all of the above are true, Graph indicates it has added the
// user permission, but it immediately drops it
//
// See: https://github.com/rclone/rclone/issues/8465
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
// Return true if identity has any user permissions
hasUserIdentity := func(identity *api.IdentitySet) bool {
if identity == nil {
return false
}
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
}
// Return true if p has any user permissions
hasUser := func(p *api.PermissionsType) bool {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
aHasUser := hasUser(a)
bHasUser := hasUser(b)
if aHasUser && !bHasUser {
return -1
} else if !aHasUser && bHasUser {
return 1
}
return 0
})
}
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues // sortPermissions sorts the permissions (to be written) into add, update, and remove queues
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) { func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
new, old := m.queuedPermissions, m.permissions new, old := m.queuedPermissions, m.permissions
if len(old) == 0 || m.permsAddOnly { if len(old) == 0 || m.permsAddOnly {
m.orderPermissions(new)
return new, nil, nil // they must all be "add" return new, nil, nil // they must all be "add"
} }
@@ -494,9 +447,6 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
remove = append(remove, o) remove = append(remove, o)
} }
} }
m.orderPermissions(add)
m.orderPermissions(update)
m.orderPermissions(remove)
return add, update, remove return add, update, remove
} }

View File

@@ -1,125 +0,0 @@
package onedrive
import (
"encoding/json"
"testing"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderPermissions(t *testing.T) {
tests := []struct {
name string
input []*api.PermissionsType
expected []string
}{
{
name: "empty",
input: []*api.PermissionsType{},
expected: []string(nil),
},
{
name: "users first, then group, then none",
input: []*api.PermissionsType{
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
{ID: "4"},
},
expected: []string{"2", "3", "1", "4"},
},
{
name: "same type unsorted",
input: []*api.PermissionsType{
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
},
expected: []string{"c", "b", "a"},
},
{
name: "all user identities",
input: []*api.PermissionsType{
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
},
expected: []string{"c", "a", "b"},
},
{
name: "no user or group info",
input: []*api.PermissionsType{
{ID: "z"},
{ID: "x"},
{ID: "y"},
},
expected: []string{"z", "x", "y"},
},
}
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
t.Run(driveType, func(t *testing.T) {
for _, tt := range tests {
m := &Metadata{fs: &Fs{driveType: driveType}}
t.Run(tt.name, func(t *testing.T) {
if driveType == driveTypeBusiness {
for i := range tt.input {
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
tt.input[i].GrantedTo = nil
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
tt.input[i].GrantedToIdentities = nil
}
}
m.orderPermissions(tt.input)
var gotIDs []string
for _, p := range tt.input {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, tt.expected, gotIDs)
})
}
})
}
}
func TestOrderPermissionsJSON(t *testing.T) {
testJSON := `[
{
"id": "1",
"grantedToV2": {
"group": {
"id": "group@example.com"
}
},
"roles": [
"write"
]
},
{
"id": "2",
"grantedToV2": {
"user": {
"id": "user@example.com"
}
},
"roles": [
"write"
]
}
]`
var testPerms []*api.PermissionsType
err := json.Unmarshal([]byte(testJSON), &testPerms)
require.NoError(t, err)
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
m.orderPermissions(testPerms)
var gotIDs []string
for _, p := range testPerms {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, []string{"2", "1"}, gotIDs)
}

View File

@@ -40,6 +40,7 @@ import (
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -64,21 +65,14 @@ const (
// Globals // Globals
var ( var (
authPath = "/common/oauth2/v2.0/authorize"
// Define the paths used for token operations tokenPath = "/common/oauth2/v2.0/token"
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
authPath = "/oauth2/v2.0/authorize"
tokenPath = "/oauth2/v2.0/token"
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"} scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"} scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// When using client credential OAuth flow, scope of .default is required in order // Description of how to auth for this app for a business account
// to use the permissions configured for the application within the tenant oauthConfig = &oauth2.Config{
scopeAccessClientCred = fs.SpaceSepList{".default"}
// Base config for how to auth
oauthConfig = &oauthutil.Config{
Scopes: scopeAccess, Scopes: scopeAccess,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -131,7 +125,7 @@ func init() {
Help: "Microsoft Cloud for US Government", Help: "Microsoft Cloud for US Government",
}, { }, {
Value: regionDE, Value: regionDE,
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).", Help: "Microsoft Cloud Germany",
}, { }, {
Value: regionCN, Value: regionCN,
Help: "Azure and Office 365 operated by Vnet Group in China", Help: "Azure and Office 365 operated by Vnet Group in China",
@@ -189,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true", Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
}, },
}, },
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
Set this if using
- Client Credential flow
`,
Sensitive: true,
}, { }, {
Name: "disable_site_permission", Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission. Help: `Disable the request for Sites.Read.All permission.
@@ -541,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}) })
} }
// Make the oauth config for the backend
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
// Copy the default oauthConfig
oauthConfig := *oauthConfig
// Set the scopes
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
// Construct the auth URLs
prefix := commonPathPrefix
if opt.Tenant != "" {
prefix = "/" + opt.Tenant
}
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
// Check to see if we are using client credentials flow
if opt.ClientCredentials {
// Override scope to .default
oauthConfig.Scopes = scopeAccessClientCred
if opt.Tenant == "" {
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
}
}
return &oauthConfig, nil
}
// Config the backend // Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) { func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options) region, graphURL := getRegionURL(m)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
_, graphURL := getRegionURL(m)
// Check to see if this is the start of the state machine execution if config.State == "" {
if conf.State == "" { var accessScopes fs.SpaceSepList
conf, err := makeOauthConfig(ctx, opt) accessScopesString, _ := m.Get("access_scopes")
err := accessScopes.Set(accessScopesString)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
}
oauthConfig.Scopes = []string(accessScopes)
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
} }
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{ return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: conf, OAuth2Config: oauthConfig,
}) })
} }
@@ -596,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to configure OneDrive: %w", err) return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
} }
// Create a REST client, build on the OAuth client created above
srv := rest.NewClient(oAuthClient) srv := rest.NewClient(oAuthClient)
switch conf.State { switch config.State {
case "choose_type": case "choose_type":
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{ return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive", Value: "onedrive",
@@ -626,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}}) }})
case "choose_type_done": case "choose_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result) return fs.ConfigGoto(config.Result)
case "onedrive": case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{ opts: rest.Opts{
@@ -644,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}, },
}) })
case "driveid": case "driveid":
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID") return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
if err != nil {
return out, err
}
// Default the drive_id to the previous version in the config
out.Option.Default, _ = m.Get("drive_id")
return out, nil
case "driveid_end": case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: conf.Result, finalDriveID: config.Result,
}) })
case "siteid": case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID") return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end": case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result, siteID: config.Result,
}) })
case "url": case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL return fs.ConfigInput("url_end", "config_site_url", `Site URL
@@ -670,7 +622,7 @@ Examples:
- "https://XXX.sharepoint.com/teams/ID" - "https://XXX.sharepoint.com/teams/ID"
`) `)
case "url_end": case "url_end":
siteURL := conf.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
@@ -685,12 +637,12 @@ Examples:
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`) return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end": case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: conf.Result, relativePath: config.Result,
}) })
case "search": case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`) return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end": case "search_end":
searchTerm := conf.Result searchTerm := config.Result
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
RootURL: graphURL, RootURL: graphURL,
@@ -712,10 +664,10 @@ Examples:
}) })
case "search_sites": case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result, siteID: config.Result,
}) })
case "driveid_final": case "driveid_final":
finalDriveID := conf.Result finalDriveID := config.Result
// Test the driveID and get drive type // Test the driveID and get drive type
opts := rest.Opts{ opts := rest.Opts{
@@ -734,12 +686,12 @@ Examples:
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)) return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end": case "driveid_final_end":
if conf.Result == "true" { if config.Result == "true" {
return nil, nil return nil, nil
} }
return fs.ConfigGoto("choose_type") return fs.ConfigGoto("choose_type")
} }
return nil, fmt.Errorf("unknown state %q", conf.State) return nil, fmt.Errorf("unknown state %q", config.State)
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
@@ -750,9 +702,7 @@ type Options struct {
DriveType string `config:"drive_type"` DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"` RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"` DisableSitePermission bool `config:"disable_site_permission"`
ClientCredentials bool `config:"client_credentials"`
AccessScopes fs.SpaceSepList `config:"access_scopes"` AccessScopes fs.SpaceSepList `config:"access_scopes"`
Tenant string `config:"tenant"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"` ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"` ListChunk int64 `config:"list_chunk"`
@@ -1040,10 +990,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig.Scopes = opt.AccessScopes
oauthConfig, err := makeOauthConfig(ctx, opt) if opt.DisableSitePermission {
if err != nil { oauthConfig.Scopes = scopeAccessWithoutSites
return nil, err }
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
} }
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
@@ -2610,11 +2563,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("can't upload content to a OneNote file") return errors.New("can't upload content to a OneNote file")
} }
// Only start the renewer if we have a valid one o.fs.tokenRenewer.Start()
if o.fs.tokenRenewer != nil { defer o.fs.tokenRenewer.Stop()
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
size := src.Size() size := src.Size()

View File

@@ -22,7 +22,6 @@ import (
"github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
) )
@@ -184,9 +183,6 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
if ossPartNumber <= 8 { if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err) return shouldRetry(ctx, resp.HTTPResponse(), err)
} }
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few // retry all chunks once have done the first few
return true, err return true, err
} }

View File

@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
Sensitive: true, Sensitive: true,
}, { }, {
Name: "compartment", Name: "compartment",
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.", Help: "Object storage compartment OCID",
Provider: "!no_auth", Provider: "!no_auth",
Required: false, Required: true,
Sensitive: true, Sensitive: true,
}, { }, {
Name: "region", Name: "region",

View File

@@ -48,10 +48,12 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://my.pcloud.com/oauth2/authorize", Endpoint: oauth2.Endpoint{
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -59,8 +61,8 @@ var (
) )
// Update the TokenURL with the actual hostname // Update the TokenURL with the actual hostname
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) { func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token" oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
} }
// Register with Fs // Register with Fs
@@ -77,7 +79,7 @@ func init() {
fs.Errorf(nil, "Failed to read config: %v", err) fs.Errorf(nil, "Failed to read config: %v", err)
} }
updateTokenURL(oauthConfig, optc.Hostname) updateTokenURL(oauthConfig, optc.Hostname)
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error { checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil { if auth == nil || auth.Form == nil {
return errors.New("form not found in response") return errors.New("form not found in response")
} }
@@ -397,15 +399,14 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
if err != nil { if err != nil {
return nil, fmt.Errorf("open file: %w", err) return nil, fmt.Errorf("open file: %w", err)
} }
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
return nil, fmt.Errorf("close file: %w", err)
}
writer := &writerAt{ writer := &writerAt{
ctx: ctx, ctx: ctx,
client: client,
fs: f, fs: f,
size: size, size: size,
remote: remote, remote: remote,
fd: openResult.FileDescriptor,
fileID: openResult.Fileid, fileID: openResult.Fileid,
} }
@@ -424,7 +425,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
}) })
// Set our own http client in the context // Set our own http client in the context
ctx = oauthutil.Context(ctx, baseClient) ctx = oauthutil.Context(ctx, baseClient)
// create a new oauth client, reuse the token source // create a new oauth client, re-use the token source
oAuthClient := oauth2.NewClient(ctx, f.ts) oAuthClient := oauth2.NewClient(ctx, f.ts)
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
} }

View File

@@ -18,14 +18,21 @@ import (
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud. // writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
type writerAt struct { type writerAt struct {
ctx context.Context ctx context.Context
client *rest.Client
fs *Fs fs *Fs
size int64 size int64
remote string remote string
fd int64
fileID int64 fileID int64
} }
// Close implements WriterAt.Close. // Close implements WriterAt.Close.
func (c *writerAt) Close() error { func (c *writerAt) Close() error {
// close fd
if _, err := c.fileClose(c.ctx); err != nil {
return fmt.Errorf("close fd: %w", err)
}
// Avoiding race conditions: Depending on the tcp connection, there might be // Avoiding race conditions: Depending on the tcp connection, there might be
// caching issues when checking the size immediately after write. // caching issues when checking the size immediately after write.
// Hence we try avoiding them by checking the resulting size on a different connection. // Hence we try avoiding them by checking the resulting size on a different connection.
@@ -65,18 +72,8 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
inSHA1Bytes := sha1.Sum(buffer) inSHA1Bytes := sha1.Sum(buffer)
inSHA1 := hex.EncodeToString(inSHA1Bytes[:]) inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
client, err := c.fs.newSingleConnClient(c.ctx)
if err != nil {
return 0, fmt.Errorf("create client: %w", err)
}
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
if err != nil {
return 0, fmt.Errorf("open file: %w", err)
}
// get target hash // get target hash
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength)) outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
if err != nil { if err != nil {
return 0, err return 0, err
} }
@@ -92,15 +89,10 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
} }
// upload buffer with offset if necessary // upload buffer with offset if necessary
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil { if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
return 0, err return 0, err
} }
// close fd
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
return contentLength, fmt.Errorf("close fd: %w", err)
}
return contentLength, nil return contentLength, nil
} }
@@ -133,40 +125,11 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
return result, nil return result, nil
} }
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
opts.Parameters.Set("flags", "0x0002") // O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_checksum, see [API Doc.] // Call pcloud file_checksum, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
func fileChecksum( func (c *writerAt) fileChecksum(
ctx context.Context, ctx context.Context,
client *rest.Client, offset, count int64,
pacer *fs.Pacer,
fd, offset, count int64,
) (*api.FileChecksumResponse, error) { ) (*api.FileChecksumResponse, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
@@ -177,29 +140,26 @@ func fileChecksum(
"Connection": "keep-alive", "Connection": "keep-alive",
}, },
} }
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
opts.Parameters.Set("count", strconv.FormatInt(count, 10)) opts.Parameters.Set("count", strconv.FormatInt(count, 10))
result := &api.FileChecksumResponse{} result := &api.FileChecksumResponse{}
err := pacer.CallNoRetry(func() (bool, error) { err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result) resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err) return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
} }
return result, nil return result, nil
} }
// Call pcloud file_pwrite, see [API Doc.] // Call pcloud file_pwrite, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
func filePWrite( func (c *writerAt) filePWrite(
ctx context.Context, ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
offset int64, offset int64,
buf []byte, buf []byte,
) (*api.FilePWriteResponse, error) { ) (*api.FilePWriteResponse, error) {
@@ -216,29 +176,24 @@ func filePWrite(
"Connection": "keep-alive", "Connection": "keep-alive",
}, },
} }
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
result := &api.FilePWriteResponse{} result := &api.FilePWriteResponse{}
err := pacer.CallNoRetry(func() (bool, error) { err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result) resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err) return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
} }
return result, nil return result, nil
} }
// Call pcloud file_close, see [API Doc.] // Call pcloud file_close, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html // [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
func fileClose( func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
) (*api.FileCloseResponse, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
Path: "/file_close", Path: "/file_close",
@@ -246,11 +201,11 @@ func fileClose(
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: true, Close: true,
} }
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10)) opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
result := &api.FileCloseResponse{} result := &api.FileCloseResponse{}
err := pacer.CallNoRetry(func() (bool, error) { err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result) resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err) err = result.Error.Update(err)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })

View File

@@ -82,11 +82,13 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://user.mypikpak.com/v1/auth/signin", Endpoint: oauth2.Endpoint{
TokenURL: "https://user.mypikpak.com/v1/auth/token", AuthURL: "https://user.mypikpak.com/v1/auth/signin",
AuthStyle: oauth2.AuthStyleInParams, TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: clientID, ClientID: clientID,
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
} }
@@ -213,11 +215,6 @@ Fill in for rclone to use a non root folder as its starting point.
Default: false, Default: false,
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.", Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true, Advanced: true,
}, {
Name: "no_media_link",
Default: false,
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
Advanced: true,
}, { }, {
Name: "hash_memory_limit", Name: "hash_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate hash if required.", Help: "Files bigger than this will be cached on disk to calculate hash if required.",
@@ -291,7 +288,6 @@ type Options struct {
RootFolderID string `config:"root_folder_id"` RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"` UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"` TrashedOnly bool `config:"trashed_only"`
NoMediaLink bool `config:"no_media_link"`
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"` HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
@@ -1581,14 +1577,15 @@ func (o *Object) setMetaData(info *api.File) (err error) {
o.md5sum = info.Md5Checksum o.md5sum = info.Md5Checksum
if info.Links.ApplicationOctetStream != nil { if info.Links.ApplicationOctetStream != nil {
o.link = info.Links.ApplicationOctetStream o.link = info.Links.ApplicationOctetStream
if !o.fs.opt.NoMediaLink { if fid := parseFileID(o.link.URL); fid != "" {
if fid := parseFileID(o.link.URL); fid != "" { for mid, media := range info.Medias {
for _, media := range info.Medias { if media.Link == nil {
if media.Link != nil && parseFileID(media.Link.URL) == fid { continue
fs.Debugf(o, "Using a media link") }
o.link = media.Link if mfid := parseFileID(media.Link.URL); fid == mfid {
break fs.Debugf(o, "Using a media link from Medias[%d]", mid)
} o.link = media.Link
break
} }
} }
} }

View File

@@ -43,6 +43,7 @@ import (
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -58,10 +59,12 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://www.premiumize.me/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://www.premiumize.me/token", AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
) )
/* /*
@@ -40,10 +41,12 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
putioConfig = &oauthutil.Config{ putioConfig = &oauth2.Config{
Scopes: []string{}, Scopes: []string{},
AuthURL: "https://api.put.io/v2/oauth2/authenticate", Endpoint: oauth2.Endpoint{
TokenURL: "https://api.put.io/v2/oauth2/access_token", AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret), ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,

View File

@@ -934,67 +934,34 @@ func init() {
Help: "The default endpoint\nIran", Help: "The default endpoint\nIran",
}}, }},
}, { }, {
// Linode endpoints: https://techdocs.akamai.com/cloud-computing/docs/object-storage-product-limits#supported-endpoint-types-by-region // Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for Linode Object Storage API.", Help: "Endpoint for Linode Object Storage API.",
Provider: "Linode", Provider: "Linode",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "nl-ams-1.linodeobjects.com",
Help: "Amsterdam (Netherlands), nl-ams-1",
}, {
Value: "us-southeast-1.linodeobjects.com", Value: "us-southeast-1.linodeobjects.com",
Help: "Atlanta, GA (USA), us-southeast-1", Help: "Atlanta, GA (USA), us-southeast-1",
}, {
Value: "in-maa-1.linodeobjects.com",
Help: "Chennai (India), in-maa-1",
}, { }, {
Value: "us-ord-1.linodeobjects.com", Value: "us-ord-1.linodeobjects.com",
Help: "Chicago, IL (USA), us-ord-1", Help: "Chicago, IL (USA), us-ord-1",
}, { }, {
Value: "eu-central-1.linodeobjects.com", Value: "eu-central-1.linodeobjects.com",
Help: "Frankfurt (Germany), eu-central-1", Help: "Frankfurt (Germany), eu-central-1",
}, {
Value: "id-cgk-1.linodeobjects.com",
Help: "Jakarta (Indonesia), id-cgk-1",
}, {
Value: "gb-lon-1.linodeobjects.com",
Help: "London 2 (Great Britain), gb-lon-1",
}, {
Value: "us-lax-1.linodeobjects.com",
Help: "Los Angeles, CA (USA), us-lax-1",
}, {
Value: "es-mad-1.linodeobjects.com",
Help: "Madrid (Spain), es-mad-1",
}, {
Value: "au-mel-1.linodeobjects.com",
Help: "Melbourne (Australia), au-mel-1",
}, {
Value: "us-mia-1.linodeobjects.com",
Help: "Miami, FL (USA), us-mia-1",
}, { }, {
Value: "it-mil-1.linodeobjects.com", Value: "it-mil-1.linodeobjects.com",
Help: "Milan (Italy), it-mil-1", Help: "Milan (Italy), it-mil-1",
}, { }, {
Value: "us-east-1.linodeobjects.com", Value: "us-east-1.linodeobjects.com",
Help: "Newark, NJ (USA), us-east-1", Help: "Newark, NJ (USA), us-east-1",
}, {
Value: "jp-osa-1.linodeobjects.com",
Help: "Osaka (Japan), jp-osa-1",
}, { }, {
Value: "fr-par-1.linodeobjects.com", Value: "fr-par-1.linodeobjects.com",
Help: "Paris (France), fr-par-1", Help: "Paris (France), fr-par-1",
}, {
Value: "br-gru-1.linodeobjects.com",
Help: "São Paulo (Brazil), br-gru-1",
}, { }, {
Value: "us-sea-1.linodeobjects.com", Value: "us-sea-1.linodeobjects.com",
Help: "Seattle, WA (USA), us-sea-1", Help: "Seattle, WA (USA), us-sea-1",
}, { }, {
Value: "ap-south-1.linodeobjects.com", Value: "ap-south-1.linodeobjects.com",
Help: "Singapore, ap-south-1", Help: "Singapore ap-south-1",
}, {
Value: "sg-sin-1.linodeobjects.com",
Help: "Singapore 2, sg-sin-1",
}, { }, {
Value: "se-sto-1.linodeobjects.com", Value: "se-sto-1.linodeobjects.com",
Help: "Stockholm (Sweden), se-sto-1", Help: "Stockholm (Sweden), se-sto-1",
@@ -1376,7 +1343,7 @@ func init() {
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox", Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io", Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint", Help: "Dream Objects endpoint",
@@ -1389,10 +1356,6 @@ func init() {
Value: "sfo3.digitaloceanspaces.com", Value: "sfo3.digitaloceanspaces.com",
Help: "DigitalOcean Spaces San Francisco 3", Help: "DigitalOcean Spaces San Francisco 3",
Provider: "DigitalOcean", Provider: "DigitalOcean",
}, {
Value: "sfo2.digitaloceanspaces.com",
Help: "DigitalOcean Spaces San Francisco 2",
Provider: "DigitalOcean",
}, { }, {
Value: "fra1.digitaloceanspaces.com", Value: "fra1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Frankfurt 1", Help: "DigitalOcean Spaces Frankfurt 1",
@@ -1409,18 +1372,6 @@ func init() {
Value: "sgp1.digitaloceanspaces.com", Value: "sgp1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Singapore 1", Help: "DigitalOcean Spaces Singapore 1",
Provider: "DigitalOcean", Provider: "DigitalOcean",
}, {
Value: "lon1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces London 1",
Provider: "DigitalOcean",
}, {
Value: "tor1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Toronto 1",
Provider: "DigitalOcean",
}, {
Value: "blr1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Bangalore 1",
Provider: "DigitalOcean",
}, { }, {
Value: "localhost:8333", Value: "localhost:8333",
Help: "SeaweedFS S3 localhost", Help: "SeaweedFS S3 localhost",
@@ -1525,6 +1476,14 @@ func init() {
Value: "s3.ir-tbz-sh1.arvanstorage.ir", Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint", Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
Provider: "ArvanCloud", Provider: "ArvanCloud",
}, {
Value: "br-se1.magaluobjects.com",
Help: "Magalu BR Southeast 1 endpoint",
Provider: "Magalu",
}, {
Value: "br-ne1.magaluobjects.com",
Help: "Magalu BR Northeast 1 endpoint",
Provider: "Magalu",
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",
@@ -2097,7 +2056,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "One Zone Infrequent Access storage class", Help: "One Zone Infrequent Access storage class",
}, { }, {
Value: "GLACIER", Value: "GLACIER",
Help: "Glacier Flexible Retrieval storage class", Help: "Glacier storage class",
}, { }, {
Value: "DEEP_ARCHIVE", Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class", Help: "Glacier Deep Archive storage class",
@@ -2163,16 +2122,13 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Standard storage class", Help: "Standard storage class",
}}, }},
}, { }, {
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard // Mapping from here: #todo
Name: "storage_class", Name: "storage_class",
Help: "The storage class to use when storing new objects in Magalu.", Help: "The storage class to use when storing new objects in Magalu.",
Provider: "Magalu", Provider: "Magalu",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "STANDARD", Value: "STANDARD",
Help: "Standard storage class", Help: "Standard storage class",
}, {
Value: "GLACIER_IR",
Help: "Glacier Instant Retrieval storage class",
}}, }},
}, { }, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925 // Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
@@ -3388,7 +3344,7 @@ func setQuirks(opt *Options) {
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
urlEncodeListings = true // URL encode the listings to help with control characters urlEncodeListings = true // URL encode the listings to help with control characters
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
@@ -4976,7 +4932,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
Usage Examples: Usage Examples:
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
@@ -5910,25 +5866,6 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
return resp.Body, err return resp.Body, err
} }
// middleware to stop the SDK adding `Accept-Encoding: identity`
func removeDisableGzip() func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
return err
}
}
// middleware to set Accept-Encoding to how we want it
//
// This make sure we download compressed files as-is from all platforms
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
APIOptions = append(APIOptions, removeDisableGzip())
if f.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
return APIOptions
}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -5962,8 +5899,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var APIOptions []func(*middleware.Stack) error var APIOptions []func(*middleware.Stack) error
// Set the SDK to always download compressed files as-is // Override the automatic decompression in the transport to
APIOptions = append(APIOptions, o.fs.acceptEncoding()...) // download compressed files as-is
if o.fs.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
for _, option := range options { for _, option := range options {
switch option.(type) { switch option.(type) {
@@ -6101,7 +6041,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
if mOut == nil { if mOut == nil {
err = fserrors.RetryErrorf("internal error: no info from multipart upload") err = fserrors.RetryErrorf("internal error: no info from multipart upload")
} else if mOut.UploadId == nil { } else if mOut.UploadId == nil {
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut) err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
} }
} }
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
@@ -6114,8 +6054,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
size: size, size: size,
f: f, f: f,
bucket: ui.req.Bucket, bucket: mOut.Bucket,
key: ui.req.Key, key: mOut.Key,
uploadID: mOut.UploadId, uploadID: mOut.UploadId,
multiPartUploadInput: &mReq, multiPartUploadInput: &mReq,
completedParts: make([]types.CompletedPart, 0), completedParts: make([]types.CompletedPart, 0),
@@ -6219,9 +6159,6 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
if chunkNumber <= 8 { if chunkNumber <= 8 {
return w.f.shouldRetry(ctx, err) return w.f.shouldRetry(ctx, err)
} }
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few // retry all chunks once have done the first few
return true, err return true, err
} }

View File

@@ -23,20 +23,14 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
opt := &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:", RemoteName: "TestS3:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD"}, TiersToTest: []string{"STANDARD", "STANDARD_IA"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize, MinChunkSize: minChunkSize,
}, },
} })
// Test wider range of tiers on AWS
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
}
fstests.Run(t, opt)
} }
func TestIntegration2(t *testing.T) { func TestIntegration2(t *testing.T) {

View File

@@ -97,6 +97,7 @@ import (
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -114,11 +115,13 @@ const (
) )
// Generate a new oauth2 config which we will update when we know the TokenURL // Generate a new oauth2 config which we will update when we know the TokenURL
func newOauthConfig(tokenURL string) *oauthutil.Config { func newOauthConfig(tokenURL string) *oauth2.Config {
return &oauthutil.Config{ return &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://secure.sharefile.com/oauth/authorize", Endpoint: oauth2.Endpoint{
TokenURL: tokenURL, AuthURL: "https://secure.sharefile.com/oauth/authorize",
TokenURL: tokenURL,
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectPublicSecureURL, RedirectURL: oauthutil.RedirectPublicSecureURL,
@@ -133,7 +136,7 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
oauthConfig := newOauthConfig("") oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error { checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil { if auth == nil || auth.Form == nil {
return errors.New("endpoint not found in response") return errors.New("endpoint not found in response")
} }
@@ -144,7 +147,7 @@ func init() {
} }
endpoint := "https://" + subdomain + "." + apicp endpoint := "https://" + subdomain + "." + apicp
m.Set("endpoint", endpoint) m.Set("endpoint", endpoint)
oauthConfig.TokenURL = endpoint + tokenPath oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil return nil
} }
return oauthutil.ConfigOut("", &oauthutil.Options{ return oauthutil.ConfigOut("", &oauthutil.Options{

View File

@@ -601,10 +601,9 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
} }
fi, err := cn.smbShare.Stat(reqDir) fi, err := cn.smbShare.Stat(reqDir)
if err != nil { if err == nil {
return fmt.Errorf("SetModTime: stat: %w", err) o.statResult = fi
} }
o.statResult = fi
return err return err
} }
@@ -686,6 +685,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
defer func() { defer func() {
o.statResult, _ = cn.smbShare.Stat(filename)
o.fs.putConnection(&cn) o.fs.putConnection(&cn)
}() }()
@@ -723,7 +723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update Close failed: %w", err) return fmt.Errorf("Update Close failed: %w", err)
} }
// Set the modified time and also o.statResult // Set the modified time
err = o.SetModTime(ctx, src.ModTime(ctx)) err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil { if err != nil {
return fmt.Errorf("Update SetModTime failed: %w", err) return fmt.Errorf("Update SetModTime failed: %w", err)

View File

@@ -120,7 +120,7 @@ func init() {
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME // FIXME
// err = f.pacer.Call(func() (bool, error) { //err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil) resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err) // return shouldRetry(ctx, resp, err)
//}) //})
@@ -327,7 +327,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
func (f *Fs) getAuthToken(ctx context.Context) error { func (f *Fs) getAuthToken(ctx context.Context) error {
fs.Debugf(f, "Renewing token") fs.Debugf(f, "Renewing token")
authRequest := api.TokenAuthRequest{ var authRequest = api.TokenAuthRequest{
AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID), AccessKeyID: withDefault(f.opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)), PrivateAccessKey: withDefault(f.opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
RefreshToken: f.opt.RefreshToken, RefreshToken: f.opt.RefreshToken,
@@ -509,7 +509,7 @@ func errorHandler(resp *http.Response) (err error) {
return fmt.Errorf("error reading error out of body: %w", err) return fmt.Errorf("error reading error out of body: %w", err)
} }
match := findError.FindSubmatch(body) match := findError.FindSubmatch(body)
if len(match) < 2 || len(match[1]) == 0 { if match == nil || len(match) < 2 || len(match[1]) == 0 {
return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body) return fmt.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body)
} }
return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1]) return fmt.Errorf("HTTP error %v (%v): %s", resp.StatusCode, resp.Status, match[1])
@@ -552,7 +552,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool { found, err = f.listAll(ctx, pathID, nil, func(item *api.Collection) bool {
if strings.EqualFold(item.Name, leaf) { if strings.EqualFold(item.Name, leaf) {

View File

@@ -161,24 +161,7 @@ Set to 0 to disable chunked uploading.
Default: false, Default: false,
}, },
fshttp.UnixSocketConfig, fshttp.UnixSocketConfig,
{ },
Name: "auth_redirect",
Help: `Preserve authentication on redirect.
If the server redirects rclone to a new domain when it is trying to
read a file then normally rclone will drop the Authorization: header
from the request.
This is standard security practice to avoid sending your credentials
to an unknown webserver.
However this is desirable in some circumstances. If you are getting
an error like "401 Unauthorized" when rclone is attempting to read
files from the webdav server then you can try this option.
`,
Advanced: true,
Default: false,
}},
}) })
} }
@@ -197,7 +180,6 @@ type Options struct {
ExcludeShares bool `config:"owncloud_exclude_shares"` ExcludeShares bool `config:"owncloud_exclude_shares"`
ExcludeMounts bool `config:"owncloud_exclude_mounts"` ExcludeMounts bool `config:"owncloud_exclude_mounts"`
UnixSocket string `config:"unix_socket"` UnixSocket string `config:"unix_socket"`
AuthRedirect bool `config:"auth_redirect"`
} }
// Fs represents a remote webdav // Fs represents a remote webdav
@@ -1474,7 +1456,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Depth": "0", "Depth": "0",
}, },
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)

View File

@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
// oAuth // oAuth
@@ -46,9 +47,11 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize Endpoint: oauth2.Endpoint{
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,

View File

@@ -47,7 +47,7 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"aaaserver.profile.read", "aaaserver.profile.read",
"WorkDrive.team.READ", "WorkDrive.team.READ",
@@ -55,10 +55,11 @@ var (
"WorkDrive.files.ALL", "WorkDrive.files.ALL",
"ZohoFiles.files.ALL", "ZohoFiles.files.ALL",
}, },
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth", AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
TokenURL: "https://accounts.zoho.eu/oauth/v2/token", TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
AuthStyle: oauth2.AuthStyleInParams, AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -275,8 +276,8 @@ func setupRegion(m configmap.Mapper) error {
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region) downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region) uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region) accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region) oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region) oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
return nil return nil
} }

View File

@@ -11,5 +11,4 @@
<services+github@simjo.st> <services+github@simjo.st>
<seb•ɑƬ•chezwam•ɖɵʈ•org> <seb•ɑƬ•chezwam•ɖɵʈ•org>
<allllaboutyou@gmail.com> <allllaboutyou@gmail.com>
<psycho@feltzv.fr> <psycho@feltzv.fr>
<afw5059@gmail.com>

View File

@@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
continue continue
fi fi
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1) commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
if [ "$commit" == "" ]; then if [ "$commit" == "" ]; then
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1) commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
fi fi
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1) version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
echo $backend $version echo $backend $version
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
done done

View File

@@ -7,7 +7,6 @@ conversion into man pages etc.
import os import os
import re import re
import time import time
import subprocess
from datetime import datetime from datetime import datetime
docpath = "docs/content" docpath = "docs/content"
@@ -36,7 +35,6 @@ docs = [
"box.md", "box.md",
"cache.md", "cache.md",
"chunker.md", "chunker.md",
"cloudinary.md",
"sharefile.md", "sharefile.md",
"crypt.md", "crypt.md",
"compress.md", "compress.md",
@@ -193,23 +191,13 @@ def main():
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
build_date = datetime.utcfromtimestamp( build_date = datetime.utcfromtimestamp(
int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))) int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
with open(outfile, "w") as out: with open(outfile, "w") as out:
out.write("""\ out.write("""\
%% rclone(1) User Manual %% rclone(1) User Manual
%% Nick Craig-Wood %% Nick Craig-Wood
%% %s %% %s
# NAME """ % build_date.strftime("%b %d, %Y"))
rclone - manage files on cloud storage
# SYNOPSIS
```
%s
```
""" % (build_date.strftime("%b %d, %Y"), help_output))
for doc in docs: for doc in docs:
contents = read_doc(doc) contents = read_doc(doc)
# Substitute the commands into doc.md # Substitute the commands into doc.md

View File

@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
cmd := exec.Command("git", "log", "--oneline", from+".."+to) cmd := exec.Command("git", "log", "--oneline", from+".."+to)
out, err := cmd.Output() out, err := cmd.Output()
if err != nil { if err != nil {
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log. log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
} }
logMap = map[string]string{} logMap = map[string]string{}
logs = []string{} logs = []string{}
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
} }
match := logRe.FindSubmatch(line) match := logRe.FindSubmatch(line)
if match == nil { if match == nil {
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log. log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
} }
var hash, logMessage = string(match[1]), string(match[2]) var hash, logMessage = string(match[1]), string(match[2])
logMap[logMessage] = hash logMap[logMessage] = hash
@@ -52,12 +52,12 @@ func main() {
flag.Parse() flag.Parse()
args := flag.Args() args := flag.Args()
if len(args) != 0 { if len(args) != 0 {
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log. log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
} }
// v1.54.0 // v1.54.0
versionBytes, err := os.ReadFile("VERSION") versionBytes, err := os.ReadFile("VERSION")
if err != nil { if err != nil {
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log. log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
} }
if versionBytes[0] == 'v' { if versionBytes[0] == 'v' {
versionBytes = versionBytes[1:] versionBytes = versionBytes[1:]
@@ -65,7 +65,7 @@ func main() {
versionBytes = bytes.TrimSpace(versionBytes) versionBytes = bytes.TrimSpace(versionBytes)
semver := semver.New(string(versionBytes)) semver := semver.New(string(versionBytes))
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1) stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log. log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
masterMap, masterLogs := readCommits(stable+".0", "master") masterMap, masterLogs := readCommits(stable+".0", "master")
stableMap, _ := readCommits(stable+".0", stable+"-stable") stableMap, _ := readCommits(stable+".0", stable+"-stable")
for _, logMessage := range masterLogs { for _, logMessage := range masterLogs {

View File

@@ -7,18 +7,15 @@ Run with no arguments to test all backends or a supply a list of
backends to test. backends to test.
""" """
import os
import re
import sys
import subprocess
all_backends = "backend/all/all.go" all_backends = "backend/all/all.go"
# compile command which is more or less like the production builds # compile command which is more or less like the production builds
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"] compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
# disable CGO as that makes a lot of difference to binary size import os
os.environ["CGO_ENABLED"]="0" import re
import sys
import subprocess
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"') match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
@@ -46,9 +43,6 @@ def write_all(orig_all, backend):
# Comment out line matching backend # Comment out line matching backend
if match and match.group(1) == backend: if match and match.group(1) == backend:
line = "// " + line line = "// " + line
# s3 and pikpak depend on each other
if backend == "s3" and "pikpak" in line:
line = "// " + line
fd.write(line+"\n") fd.write(line+"\n")
def compile(): def compile():

View File

@@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
exit 1 exit 1
fi fi
VERSION="$1" VERSION="$1"
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g') ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
cat > "/tmp/${VERSION}-release-notes" <<EOF cat > "/tmp/${VERSION}-release-notes" <<EOF
This is the ${VERSION} release of rclone. This is the ${VERSION} release of rclone.

View File

@@ -23,23 +23,19 @@ func init() {
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]", Use: "authorize",
Short: `Remote authorization.`, Short: `Remote authorization.`,
Long: `Remote authorization. Used to authorize a remote or headless Long: `Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by rclone from a machine with a browser - use as instructed by
rclone config. rclone config.
The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically. link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`, Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.27", "versionIntroduced": "v1.27",
// "groups": "",
}, },
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args) cmd.CheckArgs(1, 3, command, args)

View File

@@ -1,32 +0,0 @@
package authorize
import (
"bytes"
"strings"
"testing"
"github.com/spf13/cobra"
)
func TestAuthorizeCommand(t *testing.T) {
// Test that the Use string is correctly formatted
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
}
// Test that help output contains the argument information
buf := &bytes.Buffer{}
cmd := &cobra.Command{}
cmd.AddCommand(commandDefinition)
cmd.SetOut(buf)
cmd.SetArgs([]string{"authorize", "--help"})
err := cmd.Execute()
if err != nil {
t.Fatalf("Failed to execute help command: %v", err)
}
helpOutput := buf.String()
if !strings.Contains(helpOutput, "authorize <fs name>") {
t.Errorf("Help output doesn't contain correct usage information")
}
}

View File

@@ -746,16 +746,6 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
case "test-func": case "test-func":
b.TestFn = testFunc b.TestFn = testFunc
return return
case "concurrent-func":
b.TestFn = func() {
src := filepath.Join(b.dataDir, "file7.txt")
dst := "file1.txt"
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
if err != nil {
fs.Errorf(src, "error copying file: %v", err)
}
}
return
case "fix-names": case "fix-names":
// in case the local os converted any filenames // in case the local os converted any filenames
ci.NoUnicodeNormalization = true ci.NoUnicodeNormalization = true
@@ -881,9 +871,10 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
if !ok || err != nil { if !ok || err != nil {
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err) fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
return return
} else {
// include hash of filename to make unicode form differences easier to see in logs
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
} }
// include hash of filename to make unicode form differences easier to see in logs
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
return return
default: default:
return fmt.Errorf("unknown command: %q", args[0]) return fmt.Errorf("unknown command: %q", args[0])

View File

@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
if b.opt.CompareFlag == "" { if b.opt.CompareFlag == "" {
return nil return nil
} }
var CompareFlag CompareOpt // for exclusions var CompareFlag CompareOpt // for exlcusions
opts := strings.Split(b.opt.CompareFlag, ",") opts := strings.Split(b.opt.CompareFlag, ",")
for _, opt := range opts { for _, opt := range opts {
switch strings.ToLower(strings.TrimSpace(opt)) { switch strings.ToLower(strings.TrimSpace(opt)) {

View File

@@ -161,7 +161,9 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
return return
} }
err = b.checkListing(now, newListing, "current "+msg) if err == nil {
err = b.checkListing(now, newListing, "current "+msg)
}
if err != nil { if err != nil {
return return
} }
@@ -284,7 +286,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
} }
// applyDeltas // applyDeltas
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (results2to1, results1to2 []Results, queues queues, err error) { func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
path1 := bilib.FsPath(b.fs1) path1 := bilib.FsPath(b.fs1)
path2 := bilib.FsPath(b.fs2) path2 := bilib.FsPath(b.fs2)
@@ -365,7 +367,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
} }
} }
// if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop //if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2) matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
for _, file := range ds1.sort() { for _, file := range ds1.sort() {
@@ -390,7 +392,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
} else if d2.is(deltaOther) { } else if d2.is(deltaOther) {
b.indent("!WARNING", file, "New or changed in both paths") b.indent("!WARNING", file, "New or changed in both paths")
// if files are identical, leave them alone instead of renaming //if files are identical, leave them alone instead of renaming
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) { if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file) fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
ls1.getPut(file, skippedDirs1) ls1.getPut(file, skippedDirs1)
@@ -484,6 +486,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// Do the batch operation // Do the batch operation
if copy2to1.NotEmpty() && !b.InGracefulShutdown { if copy2to1.NotEmpty() && !b.InGracefulShutdown {
changes1 = true
b.indent("Path2", "Path1", "Do queued copies to") b.indent("Path2", "Path1", "Do queued copies to")
ctx = b.setBackupDir(ctx, 1) ctx = b.setBackupDir(ctx, 1)
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1") results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
@@ -495,11 +498,12 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
return return
} }
// copy empty dirs from path2 to path1 (if --create-empty-src-dirs) //copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make") b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
} }
if copy1to2.NotEmpty() && !b.InGracefulShutdown { if copy1to2.NotEmpty() && !b.InGracefulShutdown {
changes2 = true
b.indent("Path1", "Path2", "Do queued copies to") b.indent("Path1", "Path2", "Do queued copies to")
ctx = b.setBackupDir(ctx, 2) ctx = b.setBackupDir(ctx, 2)
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2") results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
@@ -511,7 +515,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
return return
} }
// copy empty dirs from path1 to path2 (if --create-empty-src-dirs) //copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make") b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
} }
@@ -519,7 +523,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
if err = b.saveQueue(delete1, "delete1"); err != nil { if err = b.saveQueue(delete1, "delete1"); err != nil {
return return
} }
// propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs) //propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove") b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
} }
@@ -527,7 +531,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
if err = b.saveQueue(delete2, "delete2"); err != nil { if err = b.saveQueue(delete2, "delete2"); err != nil {
return return
} }
// propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs) //propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove") b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
} }

View File

@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
return "", "", fmt.Errorf("invalid hash %q", str) return "", "", fmt.Errorf("invalid hash %q", str)
} }
// checkListing verifies that listing is not empty (unless resyncing) // checkListing verifies that listing is not empty (unless resynching)
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error { func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
if b.opt.Resync || !ls.empty() { if b.opt.Resync || !ls.empty() {
return nil return nil

View File

@@ -359,6 +359,8 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Determine and apply changes to Path1 and Path2 // Determine and apply changes to Path1 and Path2
noChanges := ds1.empty() && ds2.empty() noChanges := ds1.empty() && ds2.empty()
changes1 := false // 2to1
changes2 := false // 1to2
results2to1 := []Results{} results2to1 := []Results{}
results1to2 := []Results{} results1to2 := []Results{}
@@ -368,7 +370,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
fs.Infof(nil, "No changes found") fs.Infof(nil, "No changes found")
} else { } else {
fs.Infof(nil, "Applying changes") fs.Infof(nil, "Applying changes")
results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2) changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
if err != nil { if err != nil {
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) { if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err) fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
@@ -393,11 +395,21 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
} }
b.saveOldListings() b.saveOldListings()
// save new listings // save new listings
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
if noChanges { if noChanges {
b.replaceCurrentListings() b.replaceCurrentListings()
} else { } else {
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false) // 2to1 if changes1 || b.InGracefulShutdown { // 2to1
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true) // 1to2 err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
} else {
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
}
if changes2 || b.InGracefulShutdown { // 1to2
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
} else {
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
}
} }
if b.DebugName != "" { if b.DebugName != "" {
l1, _ := b.loadListing(b.listing1) l1, _ := b.loadListing(b.listing1)

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,10 +0,0 @@
# bisync listing v1 from test
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
- 19 - - 2023-08-26T00:00:00.000000000+0000 "file1.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file5.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file6.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file7.txt"
- 0 - - 2000-01-01T00:00:00.000000000+0000 "file8.txt"

View File

@@ -1,73 +0,0 @@
(01) : test concurrent
(02) : test initial bisync
(03) : bisync resync
INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.
INFO : Bisyncing with Comparison Settings:
{
"Modtime": true,
"Size": true,
"Checksum": false,
"NoSlowHash": false,
"SlowHashSyncOnly": false,
"DownloadHash": false
}
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(04) : test changed on one path - file1
(05) : touch-glob 2001-01-02 {datadir/} file5R.txt
(06) : touch-glob 2023-08-26 {datadir/} file7.txt
(07) : copy-as {datadir/}file5R.txt {path2/} file1.txt
(08) : test bisync with file changed during
(09) : concurrent-func
(10) : bisync
INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.
INFO : Bisyncing with Comparison Settings:
{
"Modtime": true,
"Size": true,
"Checksum": false,
"NoSlowHash": false,
"SlowHashSyncOnly": false,
"DownloadHash": false
}
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Building Path1 and Path2 listings
INFO : Path1 checking for diffs
INFO : Path2 checking for diffs
INFO : - Path2 File changed: size (larger), time (newer) - file1.txt
INFO : Path2: 1 changes:  0 new,  1 modified,  0 deleted
INFO : (Modified:  1 newer,  0 older,  1 larger,  0 smaller)
INFO : Applying changes
INFO : - Path2 Queue copy to Path1 - {path1/}file1.txt
INFO : - Path2 Do queued copies to - Path1
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(11) : bisync
INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.
INFO : Bisyncing with Comparison Settings:
{
"Modtime": true,
"Size": true,
"Checksum": false,
"NoSlowHash": false,
"SlowHashSyncOnly": false,
"DownloadHash": false
}
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Building Path1 and Path2 listings
INFO : Path1 checking for diffs
INFO : Path2 checking for diffs
INFO : No changes found
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -1 +0,0 @@
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.

View File

@@ -1 +0,0 @@
This file is newer

View File

@@ -1 +0,0 @@
This file is newer

View File

@@ -1 +0,0 @@
This file is newer

View File

@@ -1 +0,0 @@
Newer version

View File

@@ -1 +0,0 @@
This file is newer and not equal to 5R

View File

@@ -1 +0,0 @@
This file is newer and not equal to 5L

View File

@@ -1 +0,0 @@
This file is newer

View File

@@ -1 +0,0 @@
This file is newer

View File

@@ -1,15 +0,0 @@
test concurrent
test initial bisync
bisync resync
test changed on one path - file1
touch-glob 2001-01-02 {datadir/} file5R.txt
touch-glob 2023-08-26 {datadir/} file7.txt
copy-as {datadir/}file5R.txt {path2/} file1.txt
test bisync with file changed during
concurrent-func
bisync
bisync

View File

@@ -80,7 +80,6 @@ INFO : Path2 checking for diffs
INFO : Applying changes INFO : Applying changes
INFO : - Path1 Queue copy to Path2 - {path2/}subdir INFO : - Path1 Queue copy to Path2 - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2 INFO : - Path1 Do queued copies to - Path2
INFO : subdir: Making directory
INFO : Updating listings INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}" INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful INFO : Bisync successful

Some files were not shown because too many files have changed in this diff Show More