1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-05 01:53:14 +00:00

Compare commits

..

34 Commits

Author SHA1 Message Date
Nick Craig-Wood
f2d16ab4c5 Version v1.68.2 2024-11-15 12:20:50 +00:00
Nick Craig-Wood
c0fc4fe0ca s3: fix multitenant multipart uploads with CEPH
CEPH uses a special bucket form `tenant:bucket` for multitentant
access using S3 as documented here:

https://docs.ceph.com/en/reef/radosgw/multitenancy/#s3

However when doing multipart uploads, in the reply from
`CreateMultipart` the `tenant:` was missing from the `Bucket` response
rclone was using to build the `UploadPart` request. This caused a 404
failure return. This may be a CEPH bug, but it is easy to work around.

This changes the code to use the `Bucket` and `Key` that we used in
`CreateMultipart` in `UploadPart` rather than the one returned from
`CreateMultipart` which fixes the problem.

See: https://forum.rclone.org/t/rclone-zcat-does-not-work-with-a-multitenant-ceph-backend/48618
2024-11-14 16:50:19 +00:00
Nick Craig-Wood
669b2f2669 local: fix permission and ownership on symlinks with --links and --metadata
Before this change, if writing to a local backend with --metadata and
--links, if the incoming metadata contained mode or ownership
information then rclone would apply the mode/ownership to the
destination of the link not the link itself.

This fixes the problem by using the link safe sycall variants
lchown/fchmodat when --links and --metadata is in use. Note that Linux
does not support setting permissions on symlinks, so rclone emits a
debug message in this case.

This also fixes setting times on symlinks on Windows which wasn't
implemented for atime, mtime and was incorrectly setting the target of
the symlink for btime.

See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
2024-11-14 16:36:22 +00:00
Dimitrios Slamaris
e1ba10a86e bisync: fix output capture restoring the wrong output for logrus
Before this change, if rclone is used as a library and logrus is used
after a call to rc `sync/bisync`, logging does not work anymore and
leads to writing to a closed pipe.

This change restores the output correctly.

Fixes #8158
2024-11-14 16:36:22 +00:00
Nick Craig-Wood
022442cf58 build: fix comments after golangci-lint upgrade 2024-11-14 16:36:22 +00:00
dependabot[bot]
5cc4488294 build(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1
Bumps [github.com/golang-jwt/jwt/v4](https://github.com/golang-jwt/jwt) from 4.5.0 to 4.5.1.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v4.5.0...v4.5.1)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v4
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-14 16:36:22 +00:00
Nick Craig-Wood
ec9566c5c3 pikpak: fix fatal crash on startup with token that can't be refreshed 2024-11-14 16:36:22 +00:00
Nick Craig-Wood
f6976eb4c4 serve s3: fix excess locking which was making serve s3 single threaded
The fix for this was in the upstream library to narrow the locking
window.

See: https://forum.rclone.org/t/can-rclone-serve-s3-handle-more-than-one-client/48329/
2024-11-14 16:36:22 +00:00
Nick Craig-Wood
c242c00799 onedrive: fix Retry-After handling to look at 503 errors also
According to the Microsoft docs a Retry-After header can be returned
on 429 errors and 503 errors, but before this change we were only
checking for it on 429 errors.

See: https://forum.rclone.org/t/onedrive-503-response-retry-after-not-used/48045
2024-11-14 16:36:22 +00:00
Kaloyan Raev
bf954b74ff s3: Storj provider: fix server-side copy of files bigger than 5GB
Like some other S3-compatible providers, Storj does not currently
implements UploadPartCopy and returns NotImplemented errors for
multi-part server side copies.

This patch works around the problem by raising --s3-copy-cutoff for
Storj to the maximum. This means that rclone will never use
multi-part copies for files in Storj. This includes files larger than
5GB which (according to AWS documentation) must be copied with
multi-part copy. This works fine for Storj.

See https://github.com/storj/roadmap/issues/40
2024-11-14 16:36:22 +00:00
tgfisher
88f0770d0a docs: mention that inline comments are not supported in a filter-file 2024-11-14 16:36:22 +00:00
Randy Bush
41d905c9b0 docs: fix forward refs in step 9 of using your own client id 2024-11-14 16:36:22 +00:00
Alexandre Hamez
300a063b5e docs: fix Scaleway Glacier website URL 2024-11-14 16:36:21 +00:00
Simon Bos
61bf29ed5e dlna: fix loggingResponseWriter disregarding log level 2024-11-14 16:36:21 +00:00
Nick Craig-Wood
3191717572 s3: fix crash when using --s3-download-url after migration to SDKv2
Before this change rclone was crashing when the download URL did not
supply an X-Amz-Storage-Class header.

This change allows the header to be missing.

See: https://forum.rclone.org/t/sigsegv-on-ubuntu-24-04/48047
2024-11-14 16:36:21 +00:00
Nick Craig-Wood
961dfe97b5 docs: update overview to show pcloud can set modtime
See 258092f9c6 and #7896
2024-11-14 16:36:21 +00:00
Nick Craig-Wood
22612b4b38 Add RcloneView as a sponsor 2024-11-14 16:36:21 +00:00
Nick Craig-Wood
b9927461c3 accounting: fix wrong message on SIGUSR2 to enable/disable bwlimit
This was caused by the message code only looking at one of the
bandwidth filters, not all of them.

Fixes #8104
2024-11-14 16:36:21 +00:00
wiserain
6d04be99f2 pikpak: fix cid/gcid calculations for fs.OverrideRemote
Previously, cid/gcid (custom hash for pikpak) calculations failed when 
attempting to unwrap object info from `fs.OverrideRemote`. 

This commit introduces a new function that can correctly unwrap 
object info from both regular objects and `fs.OverrideRemote` types, 
ensuring uploads with accurate cid/gcid calculations in all scenarios.
2024-11-14 16:36:21 +00:00
nielash
06ae0dfa54 local: fix --copy-links on macOS when cloning
Before this change, --copy-links erroneously behaved like --links when using cloning
on macOS, and cloning was not supported at all when using --links.

After this change, --copy-links does what it's supposed to, and takes advantage of
cloning when possible, by copying the file being linked to instead of the link
itself.

Cloning is now also supported in --links mode for regular files (which benefit
most from cloning). symlinks in --links mode continue to be tossed back to be
handled by rclone's special translation logic.

See https://forum.rclone.org/t/macos-local-to-local-copy-with-copy-links-causes-error/47671/5?u=nielash
2024-11-14 16:36:21 +00:00
Nick Craig-Wood
912f29b5b8 Start v1.68.2-DEV development 2024-09-24 17:25:53 +01:00
Nick Craig-Wood
8d78768aaa Version v1.68.1 2024-09-24 15:47:01 +01:00
Nick Craig-Wood
6aa924f28d docs: document that fusermount3 may be needed when mounting/unmounting
See: https://forum.rclone.org/t/documentation-fusermount-vs-fusermount3/47816/
2024-09-23 17:33:09 +01:00
wiserain
48f2c2db70 pikpak: fix login issue where token retrieval fails
This addresses the login issue caused by pikpak's recent cancellation 
of existing login methods and requirement for additional verifications. 

To resolve this, we've made the following changes:

1. Similar to lib/oauthutil, we've integrated a mechanism to handle 
captcha tokens.

2. A new pikpakClient has been introduced to wrap the existing 
rest.Client and incorporate the necessary headers including 
x-captcha-token for each request.

3. Several options have been added/removed to support persistent 
user/client identification.

* client_id: No longer configurable.
* client_secret: Deprecated as it's no longer used.
* user_agent: A new option that defaults to PC/Firefox's user agent 
but can be overridden using the --pikpak-user-agent flag.
* device_id: A new option that is randomly generated if invalid. 
It is recommended not to delete or change it frequently.
* captcha_token: A new option that is automatically managed 
by rclone, similar to the OAuth token.

Fixes #7950 #8005
2024-09-23 17:33:09 +01:00
Nick Craig-Wood
a88066aff3 s3: fix rclone ignoring static credentials when env_auth=true
The SDKv2 conversion introduced a regression to do with setting
credentials with env_auth=true. The rclone documentation explicitly
states that env_auth only applies if secret_access_key and
access_key_id are blank and users had been relying on that.

However after the SDKv2 conversion we were ignoring static credentials
if env_auth=true.

This fixes the problem by ignoring env_auth=true if secret_access_key
and access_key_id are both provided. This brings rclone back into line
with the documentation and users expectations.

Fixes #8067
2024-09-23 17:33:09 +01:00
Nick Craig-Wood
75f5b06ff7 fs: fix setting stringArray config values from environment variables
After the config re-organisation, the setting of stringArray config
values (eg `--exclude` set with `RCLONE_EXCLUDE`) was broken and gave
a message like this for `RCLONE_EXCLUDE=*.jpg`:

    Failed to load "filter" default values: failed to initialise "filter" options:
    couldn't parse config item "exclude" = "*.jpg" as []string: parsing "*.jpg" as []string failed:
    invalid character '/' looking for beginning of value

This was caused by the parser trying to parse the input string as a
JSON value.

When the config was re-organised it was thought that the internal
representation of stringArray values was not important as it was never
visible externally, however this turned out not to be true.

A defined representation was chosen - a comma separated string and
this was documented and tests were introduced in this patch.

This potentially introduces a very small backwards incompatibility. In
rclone v1.67.0

    RCLONE_EXCLUDE=a,b

Would be interpreted as

    --exclude "a,b"

Whereas this new code will interpret it as

    --exclude "a" --exclude "b"

The benefit of being able to set multiple values with an environment
variable was deemed to outweigh the very small backwards compatibility
risk.

If a value with a `,` is needed, then use CSV escaping, eg

    RCLONE_EXCLUDE="a,b"

(Note this needs to have the quotes in so at the unix shell that would be

    RCLONE_EXCLUDE='"a,b"'

Fixes #8063
2024-09-23 17:33:09 +01:00
Nick Craig-Wood
daeeb7c145 rc: fix default value of --metrics-addr
Before this fix it was empty string, which isn't a good default for a
stringArray.
2024-09-23 17:33:09 +01:00
Nick Craig-Wood
d6a5fc6ffa fs: fix --dump filters not always appearing
Before this fix, we initialised the options blocks in a random order.
This meant that there was a 50/50 chance whether --dump filters would
show the filters or not as it was depending on the "main" block having
being read first to set the Dump flags.

This initialises the options blocks in a defined order which is
alphabetically but with main first which fixes the problem.
2024-09-23 17:33:09 +01:00
Nick Craig-Wood
c0bfedf99c docs: correct notes on docker manual build 2024-09-23 17:33:09 +01:00
ttionya
76b76c30bf build: fix docker release build - fixes #8062
This updates the action to use `docker/build-push-action` instead of `ilteoood/docker_buildx`
which fixes the build problem in testing.
2024-09-23 17:33:09 +01:00
Pawel Palucha
737fcc804f docs: add section for improving performance for s3 2024-09-23 17:33:09 +01:00
Nick Craig-Wood
70f3965354 onedrive: fix spurious "Couldn't decode error response: EOF" DEBUG
This DEBUG was being generated on redirects which don't have a JSON
body and is irrelevant.
2024-09-23 17:33:09 +01:00
Divyam
d5c100edaf serve docker: add missing vfs-read-chunk-streams option in docker volume driver 2024-09-23 17:33:09 +01:00
Nick Craig-Wood
dc7458cea0 Start v1.68.1-DEV development 2024-09-23 17:29:48 +01:00
547 changed files with 14459 additions and 28871 deletions

View File

@@ -17,21 +17,22 @@ on:
manual: manual:
description: Manual run (bypass default conditions) description: Manual run (bypass default conditions)
type: boolean type: boolean
required: true
default: true default: true
jobs: jobs:
build: build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -42,14 +43,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-latest os: macos-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -58,14 +59,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-latest os: macos-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -75,14 +76,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '>=1.24.0-rc.1' go: '>=1.23.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.23 - job_name: go1.21
os: ubuntu-latest os: ubuntu-latest
go: '1.23' go: '1.21'
quicktest: true
racequicktest: true
- job_name: go1.22
os: ubuntu-latest
go: '1.22'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -117,8 +124,7 @@ jobs:
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get update sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
@@ -211,7 +217,7 @@ jobs:
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -226,8 +232,6 @@ jobs:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go - name: Install Go
id: setup-go id: setup-go
@@ -291,12 +295,8 @@ jobs:
- name: Scan for vulnerabilities - name: Scan for vulnerabilities
run: govulncheck ./... run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android: android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -311,7 +311,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: '>=1.24.0-rc.1' go-version: '>=1.23.0-rc.1'
- name: Set global environment variables - name: Set global environment variables
shell: bash shell: bash

View File

@@ -0,0 +1,77 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -1,294 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*-
name: Build & Push Docker Images
# Trigger the workflow on push or pull request
on:
push:
branches:
- '**'
tags:
- '**'
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build-image:
if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request')
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
runs-on: ubuntu-24.04
- platform: linux/386
runs-on: ubuntu-24.04
- platform: linux/arm64
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v7
runs-on: ubuntu-24.04-arm
- platform: linux/arm/v6
runs-on: ubuntu-24.04-arm
name: Build Docker Image for ${{ matrix.platform }}
runs-on: ${{ matrix.runs-on }}
steps:
- name: Free Space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Set PLATFORM Variable
run: |
platform=${{ matrix.platform }}
echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV
- name: Set CACHE_NAME Variable
shell: python
run: |
import os, re
def slugify(input_string, max_length=63):
slug = input_string.lower()
slug = re.sub(r'[^a-z0-9 -]', ' ', slug)
slug = slug.strip()
slug = re.sub(r'\s+', '-', slug)
slug = re.sub(r'-+', '-', slug)
slug = slug[:max_length]
slug = re.sub(r'[-]+$', '', slug)
return slug
ref_name_slug = "cache"
if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request":
ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME'])
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"CACHE_NAME={ref_name_slug}\n")
- name: Get ImageOS
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
with:
result-encoding: string
script: |
return process.env.ImageOS
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages)
with:
images: |
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
# Cache only the go builds, the module download is cached via the docker layer caching
path: |
go-build-cache
- name: Inject Go Build Cache into Docker
uses: reproducible-containers/buildkit-cache-dance@v3
with:
cache-map: |
{
"go-build-cache": "/root/.cache/go-build"
}
skip-extraction: ${{ steps.go-cache.outputs.cache-hit }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Publish Image Digest
id: build
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
provenance: false
# don't specify 'tags' here (error "get can't push tagged ref by digest")
# tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
platforms: ${{ matrix.platform }}
outputs: |
type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}
cache-to: |
type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd
- name: Export Image Digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
retention-days: 1
if-no-files-found: error
merge-image:
name: Merge & Push Final Docker Image
runs-on: ubuntu-24.04
needs:
- build-image
steps:
- name: Download Image Digests
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set REPO_NAME Variable
run: |
echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV}
- name: Extract Metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
env:
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
with:
images: |
${{ env.REPO_NAME }}
ghcr.io/${{ env.REPO_NAME }}
labels: |
org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone
org.opencontainers.image.vendor=${{ github.repository_owner }}
org.opencontainers.image.authors=rclone <https://github.com/rclone>
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
org.opencontainers.image.revision=${{ github.sha }}
tags: |
type=sha
type=ref,event=pr
type=ref,event=branch
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=beta,enable={{is_default_branch}}
- name: Extract Tags
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
tags = [f"--tag '{tag}'" for tag in metadata["tags"]]
tags_string = " ".join(tags)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"TAGS={tags_string}\n")
- name: Extract Annotations
shell: python
run: |
import json, os
metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON']
metadata = json.loads(metadata_json)
annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]]
annotations_string = " ".join(annotations)
with open(os.environ['GITHUB_ENV'], 'a') as env:
env.write(f"ANNOTATIONS={annotations_string}\n")
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create & Push Manifest List
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
${{ env.TAGS }} \
${{ env.ANNOTATIONS }} \
$(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *)
- name: Inspect and Run Multi-Platform Image
run: |
docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }}
docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version

View File

@@ -1,49 +0,0 @@
---
# Github Actions release for rclone
# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*-
name: Release Build for Docker Plugin
on:
release:
types: [published]
workflow_dispatch:
inputs:
manual:
description: Manual run (bypass default conditions)
type: boolean
default: true
jobs:
build_docker_volume_plugin:
if: inputs.manual || github.repository == 'rclone/rclone'
name: Build docker plugin job
runs-on: ubuntu-latest
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -0,0 +1,89 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Build and publish image
uses: docker/build-push-action@v6
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
push: true
tags: |
rclone/rclone:latest
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
build_docker_volume_plugin:
if: github.repository == 'rclone/rclone'
needs: build
runs-on: ubuntu-latest
name: Build docker plugin job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin
shell: bash
run: |
VER=${GITHUB_REF#refs/tags/}
PLUGIN_USER=rclone
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
export PLUGIN_USER PLUGIN_ARCH
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
done
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}

View File

@@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) - `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs) - make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote` - update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table. - `docs/content/overview.md` - overview docs
- `docs/content/docs.md` - list of remotes in config section - `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org - `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation - `docs/layouts/chrome/navbar.html` - add it to the website navigation

View File

@@ -1,47 +1,19 @@
FROM golang:alpine AS builder FROM golang:alpine AS builder
ARG CGO_ENABLED=0 COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN echo "**** Set Go Environment Variables ****" && \ RUN apk add --no-cache make bash gawk git
go env -w GOCACHE=/root/.cache/go-build RUN \
CGO_ENABLED=0 \
RUN echo "**** Install Dependencies ****" && \ make
apk add --no-cache \ RUN ./rclone version
make \
bash \
gawk \
git
COPY go.mod .
COPY go.sum .
RUN echo "**** Download Go Dependencies ****" && \
go mod download -x
RUN echo "**** Verify Go Dependencies ****" && \
go mod verify
COPY . .
RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \
echo "**** Build Binary ****" && \
make
RUN echo "**** Print Version Binary ****" && \
./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN echo "**** Install Dependencies ****" && \ RUN apk --no-cache add ca-certificates fuse3 tzdata && \
apk add --no-cache \ echo "user_allow_other" >> /etc/fuse.conf
ca-certificates \
fuse3 \
tzdata && \
echo "Enable user_allow_other in fuse" && \
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

2231
MANUAL.html generated

File diff suppressed because it is too large Load Diff

2274
MANUAL.md generated

File diff suppressed because it is too large Load Diff

2282
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -144,14 +144,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
-@rmdir -p '$$HOME/.config/rclone' XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh

View File

@@ -66,7 +66,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/) * ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -93,7 +92,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
@@ -111,7 +109,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)

View File

@@ -47,20 +47,13 @@ Early in the next release cycle update the dependencies.
* `git commit -a -v -m "build: update all dependencies"` * `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod` If the `make updatedirect` upgrades the version of go in the `go.mod`
then go to manual mode. `go1.20` here is the lowest supported version
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`. in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
``` ```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades) go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22 go mod tidy -go=1.20 -compat=1.20
``` ```
If the `go mod tidy` fails use the output from it to remove the If the `go mod tidy` fails use the output from it to remove the
@@ -93,16 +86,6 @@ build.
Once it compiles locally, push it on a test branch and commit fixes Once it compiles locally, push it on a test branch and commit fixes
until the tests pass. until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta ## Tidy beta
At some point after the release run At some point after the release run
@@ -131,8 +114,8 @@ Now
* git co ${BASE_TAG}-stable * git co ${BASE_TAG}-stable
* git cherry-pick any fixes * git cherry-pick any fixes
* make startstable
* Do the steps as above * Do the steps as above
* make startstable
* git co master * git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct * `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md * git checkout ${BASE_TAG}-stable docs/content/changelog.md

View File

@@ -1 +1 @@
v1.70.0 v1.68.2

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
@@ -27,7 +26,6 @@ import (
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/iclouddrive"
_ "github.com/rclone/rclone/backend/imagekit" _ "github.com/rclone/rclone/backend/imagekit"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"

File diff suppressed because it is too large Load Diff

View File

@@ -3,149 +3,16 @@
package azureblob package azureblob
import ( import (
"context"
"encoding/base64"
"strings"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestBlockIDCreator(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// Check creation and random number // Check first feature flags are set on this
bic, err := newBlockIDCreator() // remote
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
enabled := f.Features().SetTier enabled := f.Features().SetTier
assert.True(t, enabled) assert.True(t, enabled)
enabled = f.Features().GetTier enabled = f.Features().GetTier
assert.True(t, enabled) assert.True(t, enabled)
} }
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -15,17 +15,13 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"}, TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize, MinChunkSize: defaultChunkSize,
}, },
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
}) })
} }
@@ -44,7 +40,6 @@ func TestIntegration2(t *testing.T) {
}, },
ExtraConfig: []fstests.ExtraConfigItem{ ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"}, {Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
}, },
}) })
} }
@@ -53,13 +48,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs) return f.setUploadChunkSize(cs)
} }
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
) )
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {

View File

@@ -237,30 +237,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.", Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true, Advanced: true,
Sensitive: true, Sensitive: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -343,12 +319,10 @@ type Options struct {
Username string `config:"username"` Username string `config:"username"`
Password string `config:"password"` Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"` ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"` UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"` MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"` MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"` MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"` MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
@@ -419,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
policyClientOptions := policy.ClientOptions{ policyClientOptions := policy.ClientOptions{
Transport: newTransporter(ctx), Transport: newTransporter(ctx),
} }
backup := service.ShareTokenIntentBackup
clientOpt := service.ClientOptions{ clientOpt := service.ClientOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
FileRequestIntent: &backup,
} }
// Here we auth by setting one of cred, sharedKeyCred or f.client // Here we auth by setting one of cred, sharedKeyCred or f.client
@@ -440,8 +412,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
} }
// Read credentials from the environment // Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{ options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions, ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
} }
cred, err = azidentity.NewDefaultAzureCredential(&options) cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil { if err != nil {
@@ -452,13 +423,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil { if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err) return nil, fmt.Errorf("create new shared key credential failed: %w", err)
} }
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.SASURL != "": case opt.SASURL != "":
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt) client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
if err != nil { if err != nil {
@@ -933,7 +897,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
// //
// May make a network request because the [fs.List] method does not // May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry // return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 { if ty != hash.MD5 {

View File

@@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
func randomString(charCount int) string { func randomString(charCount int) string {
strBldr := strings.Builder{} strBldr := strings.Builder{}
for range charCount { for i := 0; i < charCount; i++ {
randPos := rand.Int63n(52) randPos := rand.Int63n(52)
strBldr.WriteByte(chars[randPos]) strBldr.WriteByte(chars[randPos])
} }

View File

@@ -42,10 +42,9 @@ type Bucket struct {
// LifecycleRule is a single lifecycle rule // LifecycleRule is a single lifecycle rule
type LifecycleRule struct { type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"` DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"` DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"` FileNamePrefix string `json:"fileNamePrefix"`
FileNamePrefix string `json:"fileNamePrefix"`
} }
// Timestamp is a UTC time when this file was uploaded. It is a base // Timestamp is a UTC time when this file was uploaded. It is a base
@@ -130,10 +129,10 @@ type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has. Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"` } `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.

View File

@@ -16,7 +16,6 @@ import (
"io" "io"
"net/http" "net/http"
"path" "path"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -31,8 +30,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/multipart"
@@ -301,13 +299,14 @@ type Fs struct {
// Object describes a b2 object // Object describes a b2 object
type Object struct { type Object struct {
fs *Fs // what this object is part of fs *Fs // what this object is part of
remote string // The remote path remote string // The remote path
id string // b2 id of the file id string // b2 id of the file
modTime time.Time // The modified time of the object if known modTime time.Time // The modified time of the object if known
sha1 string // SHA-1 hash if known sha1 string // SHA-1 hash if known
size int64 // Size of the object size int64 // Size of the object
mimeType string // Content-Type of the object mimeType string // Content-Type of the object
meta map[string]string // The object metadata if known - may be nil - with lower case keys
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -590,7 +589,12 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
// hasPermission returns if the current AuthorizationToken has the selected permission // hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool { func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission) for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
} }
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -918,7 +922,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
last := "" last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
@@ -1271,7 +1275,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
toBeDeleted := make(chan *api.File, f.ci.Transfers) toBeDeleted := make(chan *api.File, f.ci.Transfers)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(f.ci.Transfers) wg.Add(f.ci.Transfers)
for range f.ci.Transfers { for i := 0; i < f.ci.Transfers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for object := range toBeDeleted { for object := range toBeDeleted {
@@ -1314,22 +1318,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Check current version of the file // Check current version of the file
if deleteHidden && object.Action == "hide" { if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID) fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") { toBeDeleted <- object
toBeDeleted <- object
}
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) { } else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local()) fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") { toBeDeleted <- object
toBeDeleted <- object
}
} else { } else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp))) fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
} }
} else { } else {
fs.Debugf(remote, "Deleting (id %q)", object.ID) fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") { toBeDeleted <- object
toBeDeleted <- object
}
} }
last = remote last = remote
tr.Done(ctx, nil) tr.Done(ctx, nil)
@@ -1600,6 +1598,9 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if err != nil { if err != nil {
return err return err
} }
// For now, just set "mtime" in metadata
o.meta = make(map[string]string, 1)
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
return nil return nil
} }
@@ -1879,6 +1880,13 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
Info: Info, Info: Info,
} }
// Embryonic metadata support - just mtime
o.meta = make(map[string]string, 1)
modTime, err := parseTimeStringHelper(info.Info[timeKey])
if err == nil {
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
}
// When reading files from B2 via cloudflare using // When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length // --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old // headers (presumably so it can inject stuff) so use the old
@@ -1935,7 +1943,7 @@ func init() {
// urlEncode encodes in with % encoding // urlEncode encodes in with % encoding
func urlEncode(in string) string { func urlEncode(in string) string {
var out bytes.Buffer var out bytes.Buffer
for i := range len(in) { for i := 0; i < len(in); i++ {
c := in[i] c := in[i]
if noNeedToEncode[c] { if noNeedToEncode[c] {
_ = out.WriteByte(c) _ = out.WriteByte(c)
@@ -2223,7 +2231,6 @@ This will dump something like this showing the lifecycle rules.
{ {
"daysFromHidingToDeleting": 1, "daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null, "daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": "" "fileNamePrefix": ""
} }
] ]
@@ -2250,13 +2257,12 @@ overwrites will still cause versions to be made.
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`, `,
Opts: map[string]string{ Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.", "daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden", "daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
}, },
} }
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" { if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr) days, err := strconv.Atoi(daysStr)
@@ -2272,23 +2278,14 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
} }
newRule.DaysFromUploadingToHiding = &days newRule.DaysFromUploadingToHiding = &days
} }
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("") bucketName, _ := f.split("")
if bucketName == "" { if bucketName == "" {
return nil, errors.New("bucket required") return nil, errors.New("bucket required")
} }
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) { if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName) bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -2345,7 +2342,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
}, },
} }
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
maxAge := defaultMaxAge maxAge := defaultMaxAge
if opt["max-age"] != "" { if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"]) maxAge, err = fs.ParseDuration(opt["max-age"])
@@ -2368,7 +2365,7 @@ it would do.
`, `,
} }
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
return nil, f.cleanUp(ctx, true, false, 0) return nil, f.cleanUp(ctx, true, false, 0)
} }
@@ -2387,7 +2384,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "lifecycle": case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt) return f.lifecycleCommand(ctx, name, arg, opt)

View File

@@ -5,7 +5,6 @@ import (
"crypto/sha1" "crypto/sha1"
"fmt" "fmt"
"path" "path"
"sort"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -14,7 +13,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
@@ -258,6 +256,12 @@ func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string
assert.Equal(t, v, got, k) assert.Equal(t, v, got, k)
} }
// mtime
for k, v := range metadata {
got := o.meta[k]
assert.Equal(t, v, got, k)
}
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type") assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
// Modification time from the x-bz-info-src_last_modified_millis header // Modification time from the x-bz-info-src_last_modified_millis header
@@ -459,161 +463,24 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}) })
t.Run("Cleanup", func(t *testing.T) { t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) { require.NoError(t, f.cleanUp(ctx, true, false, 0))
f.opt.Versions = true items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
defer func() { fstest.CheckListing(t, f, items)
f.opt.Versions = false // Set --b2-versions for this test
}() f.opt.Versions = true
// Listing should be unchanged after dry run defer func() {
before := listAllFiles(ctx, t, f, dirName) f.opt.Versions = false
ctx, ci := fs.AddConfig(ctx) }()
ci.DryRun = true fstest.CheckListing(t, f, items)
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
}) })
// Purge gets tested later // Purge gets tested later
} }
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal // -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata) t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions) t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -478,14 +478,17 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
remaining = up.size remaining = up.size
) )
g.SetLimit(up.f.opt.UploadConcurrency) g.SetLimit(up.f.opt.UploadConcurrency)
for part := range up.parts { for part := 0; part < up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts. // gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
break break
} }
reqSize := min(remaining, up.chunkSize) reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {

View File

@@ -43,9 +43,9 @@ import (
"github.com/rclone/rclone/lib/jwtutil" "github.com/rclone/rclone/lib/jwtutil"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2"
) )
const ( const (
@@ -64,10 +64,12 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: nil, Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://app.box.com/api/oauth2/token", AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -237,8 +239,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
signingHeaders := map[string]any{ signingHeaders := map[string]interface{}{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, "kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
@@ -254,10 +256,8 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
} }
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 { if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err) return nil, fmt.Errorf("box: extra data included in private key: %w", err)
} }
@@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
// fmt.Printf("...Error %v\n", err) //fmt.Printf("...Error %v\n", err)
return "", err return "", err
} }
// fmt.Printf("...Id %q\n", *info.Id) // fmt.Printf("...Id %q\n", *info.Id)
@@ -966,26 +966,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err return nil, err
} }
// check if dest already exists
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if item != nil { // dest already exists, need to copy to temp name and then move
tempSuffix := "-rclone-copy-" + random.String(8)
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
if err != nil {
return nil, err
}
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
err = f.deleteObject(ctx, item.ID)
if err != nil {
return nil, err
}
return f.Move(ctx, tempObj, remote)
}
// Copy the object // Copy the object
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -1343,8 +1323,12 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
nextStreamPosition = streamPosition nextStreamPosition = streamPosition
for { for {
limit := f.opt.ListChunk
// box only allows a max of 500 events // box only allows a max of 500 events
limit := min(f.opt.ListChunk, 500) if limit > 500 {
limit = 500
}
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",

View File

@@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = range maxTries { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil { if err != nil {
@@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
errs := make(chan error, 1) errs := make(chan error, 1)
var wg sync.WaitGroup var wg sync.WaitGroup
outer: outer:
for part := range session.TotalParts { for part := 0; part < session.TotalParts; part++ {
// Check any errors // Check any errors
select { select {
case err = <-errs: case err = <-errs:
@@ -211,7 +211,10 @@ outer:
default: default:
} }
reqSize := min(remaining, chunkSize) reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory // Make a block of memory
buf := make([]byte, reqSize) buf := make([]byte, reqSize)

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
@@ -1087,13 +1086,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error { func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(ctx, dir)
if err != nil { if err != nil {
return err return err
} }
for i := range entries { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(ctx, innerDir.Remote(), list)
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(ctx, dir, list)
if err != nil { if err != nil {
return err return err
@@ -1429,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}() }()
// wait until both are done // wait until both are done
for range 2 { for c := 0; c < 2; c++ {
<-done <-done
} }
} }
@@ -1754,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
func (f *Fs) Stats() (map[string]map[string]any, error) { func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
return f.cache.Stats() return f.cache.Stats()
} }
@@ -1934,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name { switch name {
case "stats": case "stats":
return f.Stats() return f.Stats()

View File

@@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size()) require.Equal(t, int64(len(checkSample)), o.Size())
for i := range checkSample { for i := 0; i < len(checkSample); i++ {
require.Equal(t, testData[i], checkSample[i]) require.Equal(t, testData[i], checkSample[i])
} }
} }
@@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
require.NoError(t, err) require.NoError(t, err)
for i := range readData { for i := 0; i < len(readData); i++ {
require.Equalf(t, testData[i], readData[i], "at byte %v", i) require.Equalf(t, testData[i], readData[i], "at byte %v", i)
} }
} }
@@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
for i := range 4 { // read first 4 for i := 0; i < 4; i++ { // read first 4
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
} }
cfs.CleanUpCache(true) cfs.CleanUpCache(true)
@@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := os.CreateTemp("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for range int(cnt) { for i := 0; i < int(cnt); i++ {
data := randStringBytes(int(chunk)) data := randStringBytes(int(chunk))
_, _ = f.Write(data) _, _ = f.Write(data)
} }
@@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
return err return err
} }
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []any var l []interface{}
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(context.Background(), remote)
for _, ll := range list { for _, ll := range list {
@@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
var err error var err error
var state cache.BackgroundUploadState var state cache.BackgroundUploadState
for range 2 { for i := 0; i < 2; i++ {
select { select {
case state = <-buCh: case state = <-buCh:
// continue // continue
@@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error { func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
var err error var err error
for range maxRetries { for i := 0; i < maxRetries; i++ {
err = block() err = block()
if err == nil { if err == nil {
return nil return nil

View File

@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"}, UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
randInstance := rand.New(rand.NewSource(time.Now().Unix())) randInstance := rand.New(rand.NewSource(time.Now().Unix()))
lastFile := "" lastFile := ""
for i := range totalFiles { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(randInstance.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"

View File

@@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
} }
} }
for i := range r.workers { for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() { if o < 0 || o >= r.cachedObject.Size() {
continue continue
@@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found { if !found {
// we're gonna give the workers a chance to pickup the chunk // we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times // and retry a couple of times
for i := range r.cacheFs().opt.ReadRetries * 8 { for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart) data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil { if err == nil {
found = true found = true

View File

@@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
if err != nil { if err != nil {
return err return err
} }
var data map[string]any var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data) err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil { if err != nil {
return fmt.Errorf("failed to obtain token: %w", err) return fmt.Errorf("failed to obtain token: %w", err)
@@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
} }
// adapted from: https://stackoverflow.com/a/28878037 (credit) // adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m any, path ...any) (any, bool) { func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path { for _, p := range path {
switch idx := p.(type) { switch idx := p.(type) {
case string: case string:
if mm, ok := m.(map[string]any); ok { if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found { if val, found := mm[idx]; found {
m = val m = val
continue continue
@@ -285,7 +285,7 @@ func get(m any, path ...any) (any, bool) {
} }
return nil, false return nil, false
case int: case int:
if mm, ok := m.([]any); ok { if mm, ok := m.([]interface{}); ok {
if len(mm) > idx { if len(mm) > idx {
m = mm[idx] m = mm[idx]
continue continue

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
) )
// Constants // Constants
@@ -598,7 +597,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
}) })
if err != nil { if err != nil {
if err == errors.ErrDatabaseNotOpen { if err == bolt.ErrDatabaseNotOpen {
// we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore // we're likely a late janitor and we need to end quietly as there's no guarantee of what exists anymore
return return
} }
@@ -607,16 +606,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
} }
// Stats returns a go map with the stats key values // Stats returns a go map with the stats key values
func (b *Persistent) Stats() (map[string]map[string]any, error) { func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
r := make(map[string]map[string]any) r := make(map[string]map[string]interface{})
r["data"] = make(map[string]any) r["data"] = make(map[string]interface{})
r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-ts"] = time.Now()
r["data"]["oldest-file"] = "" r["data"]["oldest-file"] = ""
r["data"]["newest-ts"] = time.Now() r["data"]["newest-ts"] = time.Now()
r["data"]["newest-file"] = "" r["data"]["newest-file"] = ""
r["data"]["total-chunks"] = 0 r["data"]["total-chunks"] = 0
r["data"]["total-size"] = int64(0) r["data"]["total-size"] = int64(0)
r["files"] = make(map[string]any) r["files"] = make(map[string]interface{})
r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-ts"] = time.Now()
r["files"]["oldest-name"] = "" r["files"]["oldest-name"] = ""
r["files"]["newest-ts"] = time.Now() r["files"]["newest-ts"] = time.Now()

View File

@@ -356,8 +356,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true, DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs) }).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files f.features.Disable("ListR") // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
return f, err return f, err
} }
@@ -633,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk. // forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message. // First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o any, filePath string) error { func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard { if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath) return fmt.Errorf("chunk overlap with %q", parentPath)
@@ -681,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
circleSec := unixSec % closestPrimeZzzzSeconds circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36) first4chars := strconv.FormatInt(circleSec, 36)
for range maxTransactionProbes { for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock() f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock() f.xactIDMutex.Unlock()
@@ -1190,7 +1189,10 @@ func (f *Fs) put(
} }
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID) tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := min(c.sizeLeft, c.chunkSize) size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
}
savedReadCount := c.readCount savedReadCount := c.readCount
// If a single chunk is expected, avoid the extra rename operation // If a single chunk is expected, avoid the extra rename operation
@@ -1475,7 +1477,10 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
const bufLen = 1048576 // 1 MiB const bufLen = 1048576 // 1 MiB
buf := make([]byte, bufLen) buf := make([]byte, bufLen)
for size > 0 { for size > 0 {
n := min(size, bufLen) n := size
if n > bufLen {
n = bufLen
}
if _, err := io.ReadFull(in, buf[0:n]); err != nil { if _, err := io.ReadFull(in, buf[0:n]); err != nil {
return err return err
} }
@@ -2475,7 +2480,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
if len(data) > maxMetadataSizeWritten { if len(data) > maxMetadataSizeWritten {
return nil, false, ErrMetaTooBig return nil, false, ErrMetaTooBig
} }
if len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' { if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json") return nil, false, errors.New("invalid json")
} }
var metadata metaSimpleJSON var metadata metaSimpleJSON

View File

@@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
}) })
} }
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -46,7 +46,6 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",
"Disconnect", "Disconnect",
"ListP",
}, },
} }
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,754 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@@ -266,9 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it // Enable Purge when any upstreams support it
if features.Purge == nil { if features.Purge == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -813,52 +809,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err) // defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" { if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams)) entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams { for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when)) d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d) entries = append(entries, d)
} }
return callback(entries) return entries, nil
} }
u, uRemote, err := f.findUpstream(dir) u, uRemote, err := f.findUpstream(dir)
if err != nil { if err != nil {
return err return nil, err
} }
wrappedCallback := func(entries fs.DirEntries) error { entries, err = u.f.List(ctx, uRemote)
entries, err := u.wrapEntries(ctx, entries) if err != nil {
if err != nil { return nil, err
return err
}
return callback(entries)
} }
listP := u.f.Features().ListP return u.wrapEntries(ctx, entries)
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
@@ -209,8 +208,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) { if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream") f.features.Disable("PutStream")
} }
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -355,39 +352,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found. // found.
// List entries and process them // List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, dir)
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.processEntries(entries)
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting

View File

@@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin", encryptedSuffix: ".bin",
} }
c.buffers.New = func() any { c.buffers.New = func() interface{} {
return new([blockSize]byte) return new([blockSize]byte)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
@@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
_, _ = result.WriteString(strconv.Itoa(dir) + ".") _, _ = result.WriteString(strconv.Itoa(dir) + ".")
// but we'll augment it with the nameKey for real calculation // but we'll augment it with the nameKey for real calculation
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// add the nameKey to get the real rotate distance // add the nameKey to get the real rotate distance
for i := range len(c.nameKey) { for i := 0; i < len(c.nameKey); i++ {
dir += int(c.nameKey[i]) dir += int(c.nameKey[i])
} }
@@ -664,7 +664,7 @@ func (n *nonce) increment() {
// add a uint64 to the nonce // add a uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := range 8 { for i := 0; i < 8; i++ {
digit := (*n)[i] digit := (*n)[i]
xDigit := byte(x) xDigit := byte(x)
x >>= 8 x >>= 8

View File

@@ -1307,7 +1307,10 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext)) end = int(underlyingOffset + underlyingLimit)
if end > len(ciphertext) {
end = len(ciphertext)
}
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
@@ -1487,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Test truncating the file at each possible point // Test truncating the file at each possible point
for i := range len(file16) - 1 { for i := 0; i < len(file16)-1; i++ {
what := fmt.Sprintf("truncating to %d/%d", i, len(file16)) what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
cd := newCloseDetector(bytes.NewBuffer(file16[:i])) cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
) )
// Globals // Globals
@@ -294,9 +293,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true, PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err return f, err
} }
@@ -420,40 +416,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
} if err != nil {
return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.encryptEntries(ctx, entries)
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -957,7 +924,7 @@ Usage Example:
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "decode": case "decode":
out := make([]string, 0, len(arg)) out := make([]string, 0, len(arg))

View File

@@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
} }
length := len(buf) length := len(buf)
padding := n - (length % n) padding := n - (length % n)
for range padding { for i := 0; i < padding; i++ {
buf = append(buf, byte(padding)) buf = append(buf, byte(padding))
} }
if (len(buf) % n) != 0 { if (len(buf) % n) != 0 {
@@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
if padding == 0 { if padding == 0 {
return nil, ErrorPaddingTooShort return nil, ErrorPaddingTooShort
} }
for i := range padding { for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) { if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame return nil, ErrorPaddingNotAllTheSame
} }

View File

@@ -18,7 +18,6 @@ import (
"net/http" "net/http"
"os" "os"
"path" "path"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -38,8 +37,8 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
@@ -81,10 +80,9 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
driveConfig = &oauthutil.Config{ driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"}, Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -122,7 +120,6 @@ var (
"text/html": ".html", "text/html": ".html",
"text/plain": ".txt", "text/plain": ".txt",
"text/tab-separated-values": ".tsv", "text/tab-separated-values": ".tsv",
"text/markdown": ".md",
} }
_mimeTypeToExtensionLinks = map[string]string{ _mimeTypeToExtensionLinks = map[string]string{
"application/x-link-desktop": ".desktop", "application/x-link-desktop": ".desktop",
@@ -200,7 +197,13 @@ func driveScopes(scopesString string) (scopes []string) {
// Returns true if one of the scopes was "drive.appfolder" // Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder(scopes []string) bool { func driveScopesContainsAppFolder(scopes []string) bool {
return slices.Contains(scopes, scopePrefix+"drive.appfolder") for _, scope := range scopes {
if scope == scopePrefix+"drive.appfolder" {
return true
}
}
return false
} }
func driveOAuthOptions() []fs.Option { func driveOAuthOptions() []fs.Option {
@@ -954,7 +957,12 @@ func parseDrivePath(path string) (root string, err error) {
type listFn func(*drive.File) bool type listFn func(*drive.File) bool
func containsString(slice []string, s string) bool { func containsString(slice []string, s string) bool {
return slices.Contains(slice, s) for _, e := range slice {
if e == s {
return true
}
}
return false
} }
// getFile returns drive.File for the ID passed and fields passed in // getFile returns drive.File for the ID passed and fields passed in
@@ -1143,7 +1151,13 @@ OUTER:
// Check the case of items is correct since // Check the case of items is correct since
// the `=` operator is case insensitive. // the `=` operator is case insensitive.
if title != "" && title != item.Name { if title != "" && title != item.Name {
found := slices.Contains(stems, item.Name) found := false
for _, stem := range stems {
if stem == item.Name {
found = true
break
}
}
if !found { if !found {
continue continue
} }
@@ -1196,7 +1210,6 @@ func fixMimeType(mimeTypeIn string) string {
} }
return mimeTypeOut return mimeTypeOut
} }
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) { func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in)) out = make(map[string][]string, len(in))
for k, v := range in { for k, v := range in {
@@ -1207,11 +1220,9 @@ func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
} }
return out return out
} }
func isInternalMimeType(mimeType string) bool { func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.") return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
} }
func isLinkMimeType(mimeType string) bool { func isLinkMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/x-link-") return strings.HasPrefix(mimeType, "application/x-link-")
} }
@@ -1546,10 +1557,13 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) { func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video // wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos { if f.opt.SkipChecksumGphotos {
if slices.Contains(info.Spaces, "photos") { for _, space := range info.Spaces {
info.Md5Checksum = "" if space == "photos" {
info.Sha1Checksum = "" info.Md5Checksum = ""
info.Sha256Checksum = "" info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
} }
} }
o := &Object{ o := &Object{
@@ -1641,8 +1655,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.F
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil). // When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo( func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File, ctx context.Context, remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool, extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if // Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item // we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing. // will have been resolved so this will do nothing.
@@ -1833,7 +1846,6 @@ func linkTemplate(mt string) *template.Template {
}) })
return _linkTemplates[mt] return _linkTemplates[mt]
} }
func (f *Fs) fetchFormats(ctx context.Context) { func (f *Fs) fetchFormats(ctx context.Context) {
fetchFormatsOnce.Do(func() { fetchFormatsOnce.Do(func() {
var about *drive.About var about *drive.About
@@ -1879,8 +1891,7 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
// Look through the exportExtensions and find the first format that can be // Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false) // converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) ( func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
extension, mimeType string, isDocument bool, extension, mimeType string, isDocument bool) {
) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType] exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
if isDocument { if isDocument {
for _, _extension := range f.exportExtensions { for _, _extension := range f.exportExtensions {
@@ -2189,7 +2200,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer) in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers) out := make(chan error, f.ci.Checkers)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
overflow := []listREntry{} overflow := []listREntry{}
listed := 0 listed := 0
@@ -2227,7 +2238,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg.Add(1) wg.Add(1)
in <- listREntry{directoryID, dir} in <- listREntry{directoryID, dir}
for range f.ci.Checkers { for i := 0; i < f.ci.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob) go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
} }
go func() { go func() {
@@ -2236,8 +2247,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if the input channel overflowed add the collected entries to the channel now // if the input channel overflowed add the collected entries to the channel now
for len(overflow) > 0 { for len(overflow) > 0 {
mu.Lock() mu.Lock()
l := len(overflow)
// only fill half of the channel to prevent entries being put into overflow again // only fill half of the channel to prevent entries being put into overflow again
l := min(len(overflow), listRInputBuffer/2) if l > listRInputBuffer/2 {
l = listRInputBuffer / 2
}
wg.Add(l) wg.Add(l)
for _, d := range overflow[:l] { for _, d := range overflow[:l] {
in <- d in <- d
@@ -2257,7 +2271,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Unlock() mu.Unlock()
}() }()
// wait until the all workers to finish // wait until the all workers to finish
for range f.ci.Checkers { for i := 0; i < f.ci.Checkers; i++ {
e := <-out e := <-out
mu.Lock() mu.Lock()
// if one worker returns an error early, close the input so all other workers exit // if one worker returns an error early, close the input so all other workers exit
@@ -2673,7 +2687,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if shortcutID != "" { if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash) return f.delete(ctx, shortcutID, f.opt.UseTrash)
} }
trashedFiles := false var trashedFiles = false
if check { if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool { found, err := f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, true, func(item *drive.File) bool {
if !item.Trashed { if !item.Trashed {
@@ -2910,6 +2924,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
err := f.svc.Files.EmptyTrash().Context(ctx).Do() err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return err return err
} }
@@ -3170,7 +3185,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
} }
}() }()
} }
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) { func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
var startPageToken *drive.StartPageToken var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -3509,14 +3523,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true) return f.unTrash(ctx, dir, directoryID, true)
} }
// copy or move file with id to dest // copy file with id to dest
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) { func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.getFileFields(ctx)) info, err := f.getFile(ctx, id, f.getFileFields(ctx))
if err != nil { if err != nil {
return fmt.Errorf("couldn't find id: %w", err) return fmt.Errorf("couldn't find id: %w", err)
} }
if info.MimeType == driveFolderType { if info.MimeType == driveFolderType {
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest) return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
} }
info.Name = f.opt.Enc.ToStandardName(info.Name) info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info) o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3537,21 +3551,14 @@ func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string
if err != nil { if err != nil {
return err return err
} }
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
var opErr error if err != nil {
if operation == "moveid" { return fmt.Errorf("copy failed: %w", err)
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
} else {
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
}
if opErr != nil {
return fmt.Errorf("%s failed: %w", operation, opErr)
} }
return nil return nil
} }
// Run the drive query calling fn on each entry found func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) {
list := f.svc.Files.List() list := f.svc.Files.List()
if query != "" { if query != "" {
list.Q(query) list.Q(query)
@@ -3570,7 +3577,10 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder") list.Spaces("appDataFolder")
} }
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx)) fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
var results []*drive.File
for { for {
var files *drive.FileList var files *drive.FileList
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -3578,66 +3588,20 @@ func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (e
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to execute query: %w", err) return nil, fmt.Errorf("failed to execute query: %w", err)
} }
if files.IncompleteSearch { if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE") fs.Errorf(f, "search result INCOMPLETE")
} }
for _, item := range files.Files { results = append(results, files.Files...)
fn(item)
}
if files.NextPageToken == "" { if files.NextPageToken == "" {
break break
} }
list.PageToken(files.NextPageToken) list.PageToken(files.NextPageToken)
} }
return nil
}
// Run the drive query returning the entries found
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
var results []*drive.File
err = f.queryFn(ctx, query, func(item *drive.File) {
results = append(results, item)
})
if err != nil {
return nil, err
}
return results, nil return results, nil
} }
// Rescue, list or delete orphaned files
func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) {
return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) {
if len(item.Parents) != 0 {
return
}
// Have found an orphaned entry
if delete {
fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id)
err = f.delete(ctx, item.Id, true)
if err != nil {
fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err)
}
} else if dirID == "" {
operations.SyncPrintf("%q, %q\n", item.Name, item.Id)
} else {
fs.Infof(item.Name, "Rescuing orphan %q", item.Id)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(item.Id, nil).
AddParents(dirID).
Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
if err != nil {
fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err)
}
}
})
}
var commandHelp = []fs.CommandHelp{{ var commandHelp = []fs.CommandHelp{{
Name: "get", Name: "get",
Short: "Get command for fetching the drive config parameters", Short: "Get command for fetching the drive config parameters",
@@ -3782,28 +3746,6 @@ attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying. Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`, `,
}, {
Name: "moveid",
Short: "Move files by ID",
Long: `This command moves files by ID
Usage:
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
It moves the drive file with ID given to the path (an rclone path which
will be passed internally to rclone moveto).
The path should end with a / to indicate move the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server-side moving will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
`,
}, { }, {
Name: "exportformats", Name: "exportformats",
Short: "Dump the export formats for debug purposes", Short: "Dump the export formats for debug purposes",
@@ -3851,37 +3793,6 @@ The result is a JSON array of matches, for example:
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC" "webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
} }
]`, ]`,
}, {
Name: "rescue",
Short: "Rescue or delete any orphaned files",
Long: `This command rescues or deletes any orphaned files or directories.
Sometimes files can get orphaned in Google Drive. This means that they
are no longer in any folder in Google Drive.
This command finds those files and either rescues them to a directory
you specify or deletes them.
Usage:
This can be used in 3 ways.
First, list all orphaned files
rclone backend rescue drive:
Second rescue all orphaned files to the directory indicated
rclone backend rescue drive: "relative/path/to/rescue/directory"
e.g. To rescue all orphans to a directory called "Orphans" in the top level
rclone backend rescue drive: Orphans
Third delete all orphaned files to the trash
rclone backend rescue drive: -o delete
`,
}} }}
// Command the backend to run a named command // Command the backend to run a named command
@@ -3893,7 +3804,7 @@ Third delete all orphaned files to the trash
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "get": case "get":
out := make(map[string]string) out := make(map[string]string)
@@ -3982,16 +3893,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0] dir = arg[0]
} }
return f.unTrashDir(ctx, dir, true) return f.unTrashDir(ctx, dir, true)
case "copyid", "moveid": case "copyid":
if len(arg)%2 != 0 { if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments") return nil, errors.New("need an even number of arguments")
} }
for len(arg) > 0 { for len(arg) > 0 {
id, dest := arg[0], arg[1] id, dest := arg[0], arg[1]
arg = arg[2:] arg = arg[2:]
err = f.copyOrMoveID(ctx, name, id, dest) err = f.copyID(ctx, id, dest)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err) return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
} }
} }
return nil, nil return nil, nil
@@ -4002,29 +3913,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
case "query": case "query":
if len(arg) == 1 { if len(arg) == 1 {
query := arg[0] query := arg[0]
results, err := f.query(ctx, query) var results, err = f.query(ctx, query)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err) return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
} }
return results, nil return results, nil
}
return nil, errors.New("need a query argument")
case "rescue":
dirID := ""
_, delete := opt["delete"]
if len(arg) == 0 {
// no arguments - list only
} else if !delete && len(arg) == 1 {
dir := arg[0]
dirID, err = f.dirCache.FindDir(ctx, dir, true)
if err != nil {
return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err)
}
fs.Infof(f, "Rescuing orphans into %q", dir)
} else { } else {
return nil, errors.New("syntax error: need 0 or 1 args or -o delete") return nil, errors.New("need a query argument")
} }
return nil, f.rescue(ctx, dirID, delete)
default: default:
return nil, fs.ErrorCommandNotFound return nil, fs.ErrorCommandNotFound
} }
@@ -4068,7 +3964,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 { if t != hash.MD5 && t != hash.SHA1 && t != hash.SHA256 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
@@ -4083,8 +3978,7 @@ func (o *baseObject) Size() int64 {
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error, info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
@@ -4297,13 +4191,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
return o.baseObject.open(ctx, o.url, options...) return o.baseObject.open(ctx, o.url, options...)
} }
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from // Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking // the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted. // the transfer as corrupted.
var offset, end int64 = 0, -1 var offset, end int64 = 0, -1
newOptions := options[:0] var newOptions = options[:0]
for _, o := range options { for _, o := range options {
// Note that Range requests don't work on Google docs: // Note that Range requests don't work on Google docs:
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download // https://developers.google.com/drive/v3/web/manage-downloads#partial_download
@@ -4330,10 +4223,9 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
} }
return return
} }
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
data := o.content var data = o.content
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -4358,8 +4250,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo, src fs.ObjectInfo) (info *drive.File, err error) {
) (info *drive.File, err error) {
// Make the API request to upload metadata and file data. // Make the API request to upload metadata and file data.
size := src.Size() size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) { if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
@@ -4437,7 +4328,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src) srcMimeType := fs.MimeType(ctx, src)
importMimeType := "" importMimeType := ""
@@ -4533,7 +4423,6 @@ func (o *baseObject) Metadata(ctx context.Context) (metadata fs.Metadata, err er
func (o *documentObject) ext() string { func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }
func (o *linkObject) ext() string { func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:] return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
} }

View File

@@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
wantErr error wantErr error
}{ }{
{"doc", []string{".doc"}, nil}, {"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, {" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
} { } {
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir")) require.NoError(t, f.Purge(ctx, "trashDir"))
} }
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID // TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) { func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background() ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile) obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err) require.NoError(t, err)
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
} }
t.Run("BadID", func(t *testing.T) { t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/") err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id") assert.Contains(t, err.Error(), "couldn't find id")
}) })
@@ -506,31 +506,19 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) { t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false) rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err) require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/") err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err) require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory") assert.Contains(t, err.Error(), "can't copy directory")
}) })
t.Run("MoveWithoutDestName", func(t *testing.T) { t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err) require.NoError(t, err)
checkFile(path.Base(existingFile)) checkFile(path.Base(existingFile))
}) })
t.Run("CopyWithoutDestName", func(t *testing.T) { t.Run("WithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/") err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
require.NoError(t, err) require.NoError(t, err)
checkFile("potato.txt") checkFile("potato.txt")
}) })
@@ -659,7 +647,7 @@ func (f *Fs) InternalTest(t *testing.T) {
}) })
t.Run("Shortcuts", f.InternalTestShortcuts) t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery) t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry) t.Run("ShouldRetry", f.InternalTestShouldRetry)

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"maps"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -325,7 +324,9 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
metadata := make(fs.Metadata, 16) metadata := make(fs.Metadata, 16)
// Dump user metadata first as it overrides system metadata // Dump user metadata first as it overrides system metadata
maps.Copy(metadata, info.Properties) for k, v := range info.Properties {
metadata[k] = v
}
// System metadata // System metadata
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission) metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)

View File

@@ -177,7 +177,10 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
if start >= rx.ContentLength { if start >= rx.ContentLength {
break break
} }
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else { } else {
// If size unknown read into buffer // If size unknown read into buffer

View File

@@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
) )
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -55,7 +55,10 @@ func (d *digest) Write(p []byte) (n int, err error) {
n = len(p) n = len(p)
for len(p) > 0 { for len(p) > 0 {
d.writtenMore = true d.writtenMore = true
toWrite := min(bytesPerBlock-d.n, len(p)) toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite]) _, err = d.blockHash.Write(p[:toWrite])
if err != nil { if err != nil {
panic(hashReturnedError) panic(hashReturnedError)

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) { func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk) data := make([]byte, chunk)
for i := range chunk { for i := 0; i < chunk; i++ {
data[i] = 'A' data[i] = 'A'
} }
for _, test := range []struct { for _, test := range []struct {

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
@@ -92,12 +91,9 @@ const (
maxFileNameLength = 255 maxFileNameLength = 255
) )
type exportAPIFormat string
type exportExtension string // dotless
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
dropboxConfig = &oauthutil.Config{ dropboxConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"files.metadata.write", "files.metadata.write",
"files.content.write", "files.content.write",
@@ -112,8 +108,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize", // AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token", // TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// }, // },
AuthURL: dropbox.OAuthEndpoint("").AuthURL, Endpoint: dropbox.OAuthEndpoint(""),
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -135,20 +130,10 @@ var (
DefaultTimeoutAsync: 10 * time.Second, DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100, DefaultBatchSizeAsync: 100,
} }
exportKnownAPIFormats = map[exportAPIFormat]exportExtension{
"markdown": "md",
"html": "html",
}
// Populated based on exportKnownAPIFormats
exportKnownExtensions = map[exportExtension]exportAPIFormat{}
paperExtension = ".paper"
paperTemplateExtension = ".papert"
) )
// Gets an oauth config with the right scopes // Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config { func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes // If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" { if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig return dropboxConfig
@@ -260,61 +245,23 @@ folders.`,
Help: "Specify a different Dropbox namespace ID to use as the root for all paths.", Help: "Specify a different Dropbox namespace ID to use as the root for all paths.",
Default: "", Default: "",
Advanced: true, Advanced: true,
}, { }}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
Name: "export_formats",
Help: `Comma separated list of preferred formats for exporting files
Certain Dropbox files can only be accessed by exporting them to another format.
These include Dropbox Paper documents.
For each such file, rclone will choose the first format on this list that Dropbox
considers valid. If none is valid, it will choose Dropbox's default format.
Known formats include: "html", "md" (markdown)`,
Default: fs.CommaSepList{"html", "md"},
Advanced: true,
}, {
Name: "skip_exports",
Help: "Skip exportable files in all listings.\n\nIf given, exportable files practically become invisible to rclone.",
Default: false,
Advanced: true,
}, {
Name: "show_all_exports",
Default: false,
Help: `Show all exportable files in listings.
Adding this flag will allow all exportable files to be server side copied.
Note that rclone doesn't add extensions to the exportable file names in this mode.
Do **not** use this flag when trying to download exportable files - rclone
will fail to download them.
`,
Advanced: true,
},
}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}) })
for apiFormat, ext := range exportKnownAPIFormats {
exportKnownExtensions[ext] = apiFormat
}
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"` Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"` SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"` SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"` AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
RootNsid string `config:"root_namespace"` RootNsid string `config:"root_namespace"`
ExportFormats fs.CommaSepList `config:"export_formats"`
SkipExports bool `config:"skip_exports"`
ShowAllExports bool `config:"show_all_exports"`
} }
// Fs represents a remote dropbox server // Fs represents a remote dropbox server
@@ -334,18 +281,8 @@ type Fs struct {
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata] batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
exportExts []exportExtension
} }
type exportType int
const (
notExport exportType = iota // a regular file
exportHide // should be hidden
exportListOnly // listable, but can't export
exportExportable // can export
)
// Object describes a dropbox object // Object describes a dropbox object
// //
// Dropbox Objects always have full metadata // Dropbox Objects always have full metadata
@@ -357,9 +294,6 @@ type Object struct {
bytes int64 // size of the object bytes int64 // size of the object
modTime time.Time // time it was last modified modTime time.Time // time it was last modified
hash string // content_hash of the object hash string // content_hash of the object
exportType exportType
exportAPIFormat exportAPIFormat
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -382,46 +316,32 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Some specific errors which should be excluded from retries // shouldRetry returns a boolean as to whether this err deserves to be
func shouldRetryExclude(ctx context.Context, err error) (bool, error) { // retried. It returns the err as a convenience
if err == nil { func shouldRetry(ctx context.Context, err error) (bool, error) {
return false, err
}
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
// First check for specific errors if err == nil {
// return false, err
// These come back from the SDK in a whole host of different }
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
errString := err.Error() errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter) fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
@@ -500,14 +420,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
HeaderGenerator: f.headerGenerator, HeaderGenerator: f.headerGenerator,
} }
for _, e := range opt.ExportFormats {
ext := exportExtension(e)
if exportKnownExtensions[ext] == "" {
return nil, fmt.Errorf("dropbox: unknown export format '%s'", e)
}
f.exportExts = append(f.exportExts, ext)
}
// unauthorized config for endpoints that fail with auth // unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{ ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
@@ -660,126 +572,38 @@ func (f *Fs) setRoot(root string) {
} }
} }
type getMetadataResult struct {
entry files.IsMetadata
notFound bool
err error
}
// getMetadata gets the metadata for a file or directory // getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (res getMetadataResult) { func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
res.err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res.entry, res.err = f.srv.GetMetadata(&files.GetMetadataArg{ entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath), Path: f.opt.Enc.FromStandardPath(objPath),
}) })
return shouldRetry(ctx, res.err) return shouldRetry(ctx, err)
}) })
if res.err != nil { if err != nil {
switch e := res.err.(type) { switch e := err.(type) {
case files.GetMetadataAPIError: case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
res.notFound = true notFound = true
res.err = nil err = nil
} }
} }
} }
return return
} }
// Get metadata such that the result would be exported with the given extension
// Return a channel that will eventually receive the metadata
func (f *Fs) getMetadataForExt(ctx context.Context, filePath string, wantExportExtension exportExtension) chan getMetadataResult {
ch := make(chan getMetadataResult, 1)
wantDownloadable := (wantExportExtension == "")
go func() {
defer close(ch)
res := f.getMetadata(ctx, filePath)
info, ok := res.entry.(*files.FileMetadata)
if !ok { // Can't check anything about file, just return what we have
ch <- res
return
}
// Return notFound if downloadability or extension doesn't match
if wantDownloadable != info.IsDownloadable {
ch <- getMetadataResult{notFound: true}
return
}
if !info.IsDownloadable {
_, ext := f.chooseExportFormat(info)
if ext != wantExportExtension {
ch <- getMetadataResult{notFound: true}
return
}
}
// Return our real result or error
ch <- res
}()
return ch
}
// For a given rclone-path, figure out what the Dropbox-path may be, in order of preference.
// Multiple paths might be plausible, due to export path munging.
func (f *Fs) possibleMetadatas(ctx context.Context, filePath string) (ret []<-chan getMetadataResult) {
ret = []<-chan getMetadataResult{}
// Prefer an exact match
ret = append(ret, f.getMetadataForExt(ctx, filePath, ""))
// Check if we're plausibly an export path, otherwise we're done
if f.opt.SkipExports || f.opt.ShowAllExports {
return
}
dotted := path.Ext(filePath)
if dotted == "" {
return
}
ext := exportExtension(dotted[1:])
if exportKnownExtensions[ext] == "" {
return
}
// We might be an export path! Try all possibilities
base := strings.TrimSuffix(filePath, dotted)
// `foo.papert.md` will only come from `foo.papert`. Never check something like `foo.papert.paper`
if strings.HasSuffix(base, paperTemplateExtension) {
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
return
}
// Otherwise, try both `foo.md` coming from `foo`, or from `foo.paper`
ret = append(ret, f.getMetadataForExt(ctx, base, ext))
ret = append(ret, f.getMetadataForExt(ctx, base+paperExtension, ext))
return
}
// getFileMetadata gets the metadata for a file // getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileMetadata, error) { func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
var res getMetadataResult entry, notFound, err := f.getMetadata(ctx, filePath)
if err != nil {
// Try all possible metadatas return nil, err
possibleMetadatas := f.possibleMetadatas(ctx, filePath)
for _, ch := range possibleMetadatas {
res = <-ch
if res.err != nil {
return nil, res.err
}
if !res.notFound {
break
}
} }
if notFound {
if res.notFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
fileInfo, ok := entry.(*files.FileMetadata)
fileInfo, ok := res.entry.(*files.FileMetadata)
if !ok { if !ok {
if _, ok = res.entry.(*files.FolderMetadata); ok { if _, ok = entry.(*files.FolderMetadata); ok {
return nil, fs.ErrorIsDir return nil, fs.ErrorIsDir
} }
return nil, fs.ErrorNotAFile return nil, fs.ErrorNotAFile
@@ -788,15 +612,15 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (*files.FileM
} }
// getDirMetadata gets the metadata for a directory // getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (*files.FolderMetadata, error) { func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
res := f.getMetadata(ctx, dirPath) entry, notFound, err := f.getMetadata(ctx, dirPath)
if res.err != nil { if err != nil {
return nil, res.err return nil, err
} }
if res.notFound { if notFound {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
} }
dirInfo, ok := res.entry.(*files.FolderMetadata) dirInfo, ok := entry.(*files.FolderMetadata)
if !ok { if !ok {
return nil, fs.ErrorIsFile return nil, fs.ErrorIsFile
} }
@@ -996,15 +820,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var res *files.ListFolderResult var res *files.ListFolderResult
for { for {
if !started { if !started {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(root)) arg := files.ListFolderArg{
arg.Recursive = false Path: f.opt.Enc.FromStandardPath(root),
arg.Limit = 1000 Recursive: false,
Limit: 1000,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1057,9 +882,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil { if err != nil {
return nil, err return nil, err
} }
if o.(*Object).exportType.listable() { entries = append(entries, o)
entries = append(entries, o)
}
} }
} }
if !res.HasMore { if !res.HasMore {
@@ -1145,14 +968,16 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
} }
// check directory empty // check directory empty
arg := files.NewListFolderArg(encRoot) arg := files.ListFolderArg{
arg.Recursive = false Path: encRoot,
Recursive: false,
}
if root == "/" { if root == "/" {
arg.Path = "" // Specify root folder as empty string arg.Path = "" // Specify root folder as empty string
} }
var res *files.ListFolderResult var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(arg) res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1195,20 +1020,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Temporary Object under construction // Temporary Object under construction
dstObj := &Object{ dstObj := &Object{
fs: f, fs: f,
@@ -1222,6 +1040,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()), ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
}, },
} }
var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg) result, err = f.srv.CopyV2(&arg)
@@ -1333,16 +1152,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil && createArg.Settings.Expires != nil && strings.Contains(err.Error(), sharing.SharedLinkSettingsErrorNotAuthorized) {
// Some plans can't create links with expiry
fs.Debugf(absPath, "can't create link with expiry, trying without")
createArg.Settings.Expires = nil
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
})
}
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) { sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
@@ -1507,14 +1316,16 @@ func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error)
var startCursor *files.ListFolderGetLatestCursorResult var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
arg := files.NewListFolderArg(f.opt.Enc.FromStandardPath(f.slashRoot)) arg := files.ListFolderArg{
arg.Recursive = true Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" { if arg.Path == "/" {
arg.Path = "" arg.Path = ""
} }
startCursor, err = f.srv.ListFolderGetLatestCursor(arg) startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
@@ -1618,50 +1429,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil return nil
} }
func (f *Fs) chooseExportFormat(info *files.FileMetadata) (exportAPIFormat, exportExtension) {
// Find API export formats Dropbox supports for this file
// Sometimes Dropbox lists a format in ExportAs but not ExportOptions, so check both
ei := info.ExportInfo
dropboxFormatStrings := append([]string{ei.ExportAs}, ei.ExportOptions...)
// Find which extensions these correspond to
exportExtensions := map[exportExtension]exportAPIFormat{}
var dropboxPreferredAPIFormat exportAPIFormat
var dropboxPreferredExtension exportExtension
for _, format := range dropboxFormatStrings {
apiFormat := exportAPIFormat(format)
// Only consider formats we know about
if ext, ok := exportKnownAPIFormats[apiFormat]; ok {
if dropboxPreferredAPIFormat == "" {
dropboxPreferredAPIFormat = apiFormat
dropboxPreferredExtension = ext
}
exportExtensions[ext] = apiFormat
}
}
// See if the user picked a valid extension
for _, ext := range f.exportExts {
if apiFormat, ok := exportExtensions[ext]; ok {
return apiFormat, ext
}
}
// If no matches, prefer the first valid format Dropbox lists
return dropboxPreferredAPIFormat, dropboxPreferredExtension
}
// ------------------------------------------------------------ // ------------------------------------------------------------
func (et exportType) listable() bool {
return et != exportHide
}
// something we should _try_ to export
func (et exportType) exportable() bool {
return et == exportExportable || et == exportListOnly
}
// Fs returns the parent Fs // Fs returns the parent Fs
func (o *Object) Fs() fs.Info { func (o *Object) Fs() fs.Info {
return o.fs return o.fs
@@ -1705,32 +1474,6 @@ func (o *Object) Size() int64 {
return o.bytes return o.bytes
} }
func (o *Object) setMetadataForExport(info *files.FileMetadata) {
o.bytes = -1
o.hash = ""
if o.fs.opt.SkipExports {
o.exportType = exportHide
return
}
if o.fs.opt.ShowAllExports {
o.exportType = exportListOnly
return
}
var exportExt exportExtension
o.exportAPIFormat, exportExt = o.fs.chooseExportFormat(info)
if o.exportAPIFormat == "" {
o.exportType = exportHide
} else {
o.exportType = exportExportable
// get rid of any paper extension, if present
o.remote = strings.TrimSuffix(o.remote, paperExtension)
// add the export extension
o.remote += "." + string(exportExt)
}
}
// setMetadataFromEntry sets the fs data from a files.FileMetadata // setMetadataFromEntry sets the fs data from a files.FileMetadata
// //
// This isn't a complete set of metadata and has an inaccurate date // This isn't a complete set of metadata and has an inaccurate date
@@ -1739,10 +1482,6 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.bytes = int64(info.Size) o.bytes = int64(info.Size)
o.modTime = info.ClientModified o.modTime = info.ClientModified
o.hash = info.ContentHash o.hash = info.ContentHash
if !info.IsDownloadable {
o.setMetadataForExport(info)
}
return nil return nil
} }
@@ -1806,27 +1545,6 @@ func (o *Object) Storable() bool {
return true return true
} }
func (o *Object) export(ctx context.Context) (in io.ReadCloser, err error) {
if o.exportType == exportListOnly || o.exportAPIFormat == "" {
fs.Debugf(o.remote, "No export format found")
return nil, fs.ErrorObjectNotFound
}
arg := files.ExportArg{Path: o.id, ExportFormat: string(o.exportAPIFormat)}
var exportResult *files.ExportResult
err = o.fs.pacer.Call(func() (bool, error) {
exportResult, in, err = o.fs.srv.Export(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
o.bytes = int64(exportResult.ExportMetadata.Size)
o.hash = exportResult.ExportMetadata.ExportHash
return
}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles { if o.fs.opt.SharedFiles {
@@ -1846,10 +1564,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return return
} }
if o.exportType.exportable() {
return o.export(ctx)
}
fs.FixRangeOption(options, o.bytes) fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{ arg := files.DownloadArg{
@@ -1978,10 +1692,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil) entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry { // If error is insufficient space then don't retry
return retry, err if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything except the excluded errors // after the first chunk is uploaded, we retry everything
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@@ -1,16 +1,9 @@
package dropbox package dropbox
import ( import (
"context"
"io"
"strings"
"testing" "testing"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestInternalCheckPathLength(t *testing.T) { func TestInternalCheckPathLength(t *testing.T) {
@@ -49,54 +42,3 @@ func TestInternalCheckPathLength(t *testing.T) {
assert.Equal(t, test.ok, err == nil, test.in) assert.Equal(t, test.ok, err == nil, test.in)
} }
} }
func (f *Fs) importPaperForTest(t *testing.T) {
content := `# test doc
Lorem ipsum __dolor__ sit amet
[link](http://google.com)
`
arg := files.PaperCreateArg{
Path: f.slashRootSlash + "export.paper",
ImportFormat: &files.ImportFormat{Tagged: dropbox.Tagged{Tag: files.ImportFormatMarkdown}},
}
var err error
err = f.pacer.Call(func() (bool, error) {
reader := strings.NewReader(content)
_, err = f.srv.PaperCreate(&arg, reader)
return shouldRetry(context.Background(), err)
})
require.NoError(t, err)
}
func (f *Fs) InternalTestPaperExport(t *testing.T) {
ctx := context.Background()
f.importPaperForTest(t)
f.exportExts = []exportExtension{"html"}
obj, err := f.NewObject(ctx, "export.html")
require.NoError(t, err)
rc, err := obj.Open(ctx)
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
buf, err := io.ReadAll(rc)
require.NoError(t, err)
text := string(buf)
for _, excerpt := range []string{
"Lorem ipsum",
"<b>dolor</b>",
`href="http://google.com"`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PaperExport", f.InternalTestPaperExport)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated // fields returns the JSON fields in use by opt as a | separated
// string. // string.
func fields(opt any) (pipeTags string, err error) { func fields(opt interface{}) (pipeTags string, err error) {
var tags []string var tags []string
def := reflect.ValueOf(opt) def := reflect.ValueOf(opt)
defType := def.Type() defType := def.Type()
for i := range def.NumField() { for i := 0; i < def.NumField(); i++ {
field := defType.Field(i) field := defType.Field(i)
tag, ok := field.Tag.Lookup("json") tag, ok := field.Tag.Lookup("json")
if !ok { if !ok {
@@ -239,7 +239,7 @@ func fields(opt any) (pipeTags string, err error) {
// mustFields returns the JSON fields in use by opt as a | separated // mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure. // string. It panics on failure.
func mustFields(opt any) string { func mustFields(opt interface{}) string {
tags, err := fields(opt) tags, err := fields(opt)
if err != nil { if err != nil {
panic(err) panic(err)
@@ -351,12 +351,12 @@ type SpaceInfo struct {
// DeleteResponse is returned from doDeleteFile // DeleteResponse is returned from doDeleteFile
type DeleteResponse struct { type DeleteResponse struct {
Status Status
Deleted []string `json:"deleted"` Deleted []string `json:"deleted"`
Errors []any `json:"errors"` Errors []interface{} `json:"errors"`
ID string `json:"fi_id"` ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"` BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"` UsSize string `json:"us_size"`
PaSize string `json:"pa_size"` PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"` //SpaceInfo SpaceInfo `json:"spaceinfo"`
} }

View File

@@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
} }
// params for rpc // params for rpc
type params map[string]any type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric // rpc calls the rpc.php method of the SME file fabric
// //

View File

@@ -10,7 +10,6 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"slices"
"strings" "strings"
"time" "time"
@@ -170,9 +169,11 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
} }
if apiErr, ok := err.(files_sdk.ResponseError); ok { if apiErr, ok := err.(files_sdk.ResponseError); ok {
if slices.Contains(retryErrorCodes, apiErr.HttpCode) { for _, e := range retryErrorCodes {
fs.Debugf(nil, "Retrying API error %v", err) if apiErr.HttpCode == e {
return true, err fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
} }
} }

View File

@@ -180,28 +180,12 @@ If this is set and no password is supplied then rclone will ask for a password
Default: "", Default: "",
Help: `Socks 5 proxy host. Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port. Supports the format user:pass@host:port, user@host:port, host:port.
Example: Example:
myUser:myPass@localhost:9005 myUser:myPass@localhost:9005
`, `,
Advanced: true,
}, {
Name: "no_check_upload",
Default: false,
Help: `Don't check the upload is OK
Normally rclone will try to check the upload exists after it has
uploaded a file to make sure the size and modification time are as
expected.
This flag stops rclone doing these checks. This enables uploading to
folders which are write only.
You will likely need to use the --inplace flag also if uploading to
a write only folder.
`,
Advanced: true, Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
@@ -248,7 +232,6 @@ type Options struct {
AskPassword bool `config:"ask_password"` AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"` SocksProxy string `config:"socks_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -1320,16 +1303,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("update stor: %w", err) return fmt.Errorf("update stor: %w", err)
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)
if o.fs.opt.NoCheckUpload {
o.info = &FileInfo{
Name: o.remote,
Size: uint64(src.Size()),
ModTime: src.ModTime(ctx),
precise: true,
IsDir: false,
}
return nil
}
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil { if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
return fmt.Errorf("SetModTime: %w", err) return fmt.Errorf("SetModTime: %w", err)
} }

View File

@@ -17,7 +17,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type settings map[string]any type settings map[string]interface{}
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs { func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
fsName := strings.Split(f.Name(), "{")[0] // strip off hash fsName := strings.Split(f.Name(), "{")[0] // strip off hash

View File

@@ -25,7 +25,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
@@ -734,7 +734,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
// implementation of ListR // implementation of ListR
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) { func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
@@ -820,7 +820,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err erro
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal. // of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
err = f.listR(ctx, dir, list) err = f.listR(ctx, dir, list)
if err != nil { if err != nil {
return err return err
@@ -1214,7 +1214,7 @@ func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -1228,19 +1228,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath) return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath)
} }
// Find existing object
existingObj, err := f.NewObject(ctx, remote)
if err == nil {
defer func() {
// Don't remove existing object if returning an error
if err != nil {
return
}
fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy")
err = existingObj.Remove(ctx)
}()
}
// Create temporary object // Create temporary object
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {

View File

@@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
@@ -62,10 +62,9 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
storageConfig = &oauthutil.Config{ storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope}, Scopes: []string{storage.DevstorageReadWriteScope},
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -107,12 +106,6 @@ func init() {
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Sensitive: true, Sensitive: true,
}, {
Name: "access_token",
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
Hide: fs.OptionHideConfigurator,
Sensitive: true,
Advanced: true,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -386,7 +379,6 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"` EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"` DirectoryMarkers bool `config:"directory_markers"`
AccessToken string `config:"access_token"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -543,9 +535,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
} }
} else if opt.AccessToken != "" {
ts := oauth2.Token{AccessToken: opt.AccessToken}
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil { if err != nil {
@@ -845,7 +834,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -955,6 +944,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
return e return e
} }
return f.createDirectoryMarker(ctx, bucket, dir) return f.createDirectoryMarker(ctx, bucket, dir)
} }
// mkdirParent creates the parent bucket/directory if it doesn't exist // mkdirParent creates the parent bucket/directory if it doesn't exist

View File

@@ -4,7 +4,6 @@ package googlephotos
import ( import (
"path" "path"
"slices"
"strings" "strings"
"sync" "sync"
@@ -120,7 +119,7 @@ func (as *albums) _del(album *api.Album) {
dirs := as.path[dir] dirs := as.path[dir]
for i, dir := range dirs { for i, dir := range dirs {
if dir == leaf { if dir == leaf {
dirs = slices.Delete(dirs, i, i+1) dirs = append(dirs[:i], dirs[i+1:]...)
break break
} }
} }

View File

@@ -28,11 +28,13 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
) )
@@ -59,14 +61,13 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"openid", "openid",
"profile", "profile",
scopeReadWrite, // this must be at position scopeAccess scopeReadWrite, // this must be at position scopeAccess
}, },
AuthURL: google.Endpoint.AuthURL, Endpoint: google.Endpoint,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL, RedirectURL: oauthutil.RedirectURL,
@@ -159,34 +160,6 @@ listings and transferred.
Without this flag, archived media will not be visible in directory Without this flag, archived media will not be visible in directory
listings and won't be transferred.`, listings and won't be transferred.`,
Advanced: true, Advanced: true,
}, {
Name: "proxy",
Default: "",
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you use the gphotosdl proxy then you can download original,
unchanged images.
This runs a headless browser in the background.
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
First run with
gphotosdl -login
Then once you have logged into google photos close the browser window
and run
gphotosdl
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
rclone use the proxy.
`, "|", "`"),
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -208,7 +181,6 @@ type Options struct {
BatchMode string `config:"batch_mode"` BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"` BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
Proxy string `config:"proxy"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -388,7 +360,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
Method: "GET", Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration", RootURL: "https://accounts.google.com/.well-known/openid-configuration",
} }
var openIDconfig map[string]any var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig) resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
@@ -448,7 +420,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
"token_type_hint": []string{"access_token"}, "token_type_hint": []string{"access_token"},
}, },
} }
var res any var res interface{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res) resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
@@ -482,7 +454,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// defer log.Trace(f, "remote=%q", remote)("") defer log.Trace(f, "remote=%q", remote)("")
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(ctx, remote, nil)
} }
@@ -695,7 +667,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err) defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false) match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil || pattern.isFile { if pattern == nil || pattern.isFile {
return nil, fs.ErrorDirNotFound return nil, fs.ErrorDirNotFound
@@ -712,7 +684,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// defer log.Trace(f, "src=%+v", src)("") defer log.Trace(f, "src=%+v", src)("")
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
@@ -765,7 +737,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
// Mkdir creates the album if it doesn't exist // Mkdir creates the album if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err) defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false) match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil { if pattern == nil {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
@@ -789,7 +761,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%q")("err=%v", &err) defer log.Trace(f, "dir=%q")("err=%v", &err)
match, _, pattern := patterns.match(f.root, dir, false) match, _, pattern := patterns.match(f.root, dir, false)
if pattern == nil { if pattern == nil {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
@@ -862,7 +834,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
// defer log.Trace(o, "")("") defer log.Trace(o, "")("")
if !o.fs.opt.ReadSize || o.bytes >= 0 { if !o.fs.opt.ReadSize || o.bytes >= 0 {
return o.bytes return o.bytes
} }
@@ -963,7 +935,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
// defer log.Trace(o, "")("") defer log.Trace(o, "")("")
err := o.readMetaData(ctx) err := o.readMetaData(ctx)
if err != nil { if err != nil {
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err) fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
@@ -993,20 +965,16 @@ func (o *Object) downloadURL() string {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// defer log.Trace(o, "")("") defer log.Trace(o, "")("")
err = o.readMetaData(ctx) err = o.readMetaData(ctx)
if err != nil { if err != nil {
fs.Debugf(o, "Open: Failed to read metadata: %v", err) fs.Debugf(o, "Open: Failed to read metadata: %v", err)
return nil, err return nil, err
} }
url := o.downloadURL()
if o.fs.opt.Proxy != "" {
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
}
var resp *http.Response var resp *http.Response
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
RootURL: url, RootURL: o.downloadURL(),
Options: options, Options: options,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1099,7 +1067,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer log.Trace(o, "src=%+v", src)("err=%v", &err) defer log.Trace(o, "src=%+v", src)("err=%v", &err)
match, _, pattern := patterns.match(o.fs.root, o.remote, true) match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload { if pattern == nil || !pattern.isFile || !pattern.canUpload {
return errCantUpload return errCantUpload
@@ -1168,7 +1136,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errors := make([]error, 1) errors := make([]error, 1)
results := make([]*api.MediaItem, 1) results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors) err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err == nil { if err != nil {
err = errors[0] err = errors[0]
info = results[0] info = results[0]
} }

View File

@@ -2,7 +2,6 @@ package googlephotos
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:" *fstest.RemoteName = "TestGooglePhotos:"
} }
f, err := fs.NewFs(ctx, *fstest.RemoteName) f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) { if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err) t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
} }
require.NoError(t, err) require.NoError(t, err)

View File

@@ -24,7 +24,7 @@ import (
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "drop": case "drop":
return nil, f.db.Stop(true) return nil, f.db.Stop(true)

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv" "github.com/rclone/rclone/lib/kv"
) )
@@ -183,9 +182,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
} }
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs) f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f) cache.PinUntilFinalized(f.Fs, f)
return f, err return f, err
} }
@@ -241,39 +237,10 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries. // List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f) if entries, err = f.Fs.List(ctx, dir); err != nil {
} return nil, err
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
} }
listP := f.Fs.Features().ListP return f.wrapEntries(entries)
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
} }
// ListR lists the objects and directories recursively into out. // ListR lists the objects and directories recursively into out.

View File

@@ -6,7 +6,6 @@ import (
"encoding/gob" "encoding/gob"
"errors" "errors"
"fmt" "fmt"
"maps"
"strings" "strings"
"time" "time"
@@ -196,7 +195,9 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
r.Fp = op.fp r.Fp = op.fp
} }
maps.Copy(r.Hashes, op.hashes) for hashType, hashVal := range op.hashes {
r.Hashes[hashType] = hashVal
}
if data, err = r.encode(op.key); err != nil { if data, err = r.encode(op.key); err != nil {
return fmt.Errorf("marshal failed: %w", err) return fmt.Errorf("marshal failed: %w", err)
} }

View File

@@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -47,9 +48,11 @@ const (
// Globals // Globals
var ( var (
// Description of how to auth for this app. // Description of how to auth for this app.
oauthConfig = &oauthutil.Config{ oauthConfig = &oauth2.Config{
AuthURL: "https://my.hidrive.com/client/authorize", Endpoint: oauth2.Endpoint{
TokenURL: "https://my.hidrive.com/oauth2/token", AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
},
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL, RedirectURL: oauthutil.TitleBarRedirectURL,

View File

@@ -52,7 +52,10 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
total := len(p) total := len(p)
nullBytes := make([]byte, blockSize) nullBytes := make([]byte, blockSize)
for len(p) > 0 { for len(p) > 0 {
toWrite := min(int(blockSize-*bytesInBlock), len(p)) toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
c, err := writer.Write(p[:toWrite]) c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c) *bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite]) *onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
@@ -273,7 +276,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
} }
checksum := zeroSum checksum := zeroSum
for i := range h.levels { for i := 0; i < len(h.levels); i++ {
level := h.levels[i] level := h.levels[i]
if i < len(h.levels)-1 { if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels. // Aggregate non-empty non-final levels.

View File

@@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
func TestLevelIsFull(t *testing.T) { func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel() l := hidrivehash.NewLevel()
for range 256 { for i := 0; i < 256; i++ {
assert.False(t, l.(internal.LevelHash).IsFull()) assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:]) written, err := l.Write(content[:])
assert.Equal(t, len(content), written) assert.Equal(t, len(content), written)

View File

@@ -180,6 +180,7 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
} }
addHeaders(req, opt) addHeaders(req, opt)
res, err := noRedir.Do(req) res, err := noRedir.Do(req)
if err != nil { if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err) fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult() return createFileResult()
@@ -248,14 +249,6 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
f.httpClient = client f.httpClient = client
f.endpoint = u f.endpoint = u
f.endpointURL = u.String() f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil return isFile, nil
} }
@@ -338,13 +331,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL // Join's the remote onto the base URL
func (f *Fs) url(remote string) string { func (f *Fs) url(remote string) string {
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
if f.opt.NoEscape { if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior // Directly concatenate without escaping, no_escape behavior
return f.endpointURL + trimmedRemote return f.endpointURL + remote
} }
// Default behavior // Default behavior
return f.endpointURL + rest.URLPathEscape(trimmedRemote) return f.endpointURL + rest.URLPathEscape(remote)
} }
// Errors returned by parseName // Errors returned by parseName
@@ -512,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
entries = append(entries, entry) entries = append(entries, entry)
entriesMu.Unlock() entriesMu.Unlock()
} }
for range checkers { for i := 0; i < checkers; i++ {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
@@ -747,7 +739,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "set": case "set":
newOpt := f.opt newOpt := f.opt

View File

@@ -191,33 +191,6 @@ func TestNewObject(t *testing.T) {
assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Equal(t, fs.ErrorObjectNotFound, err)
} }
func TestNewObjectWithLeadingSlash(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "/four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "/not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
m := prepareServer(t) m := prepareServer(t)

View File

@@ -1,166 +0,0 @@
// Package api provides functionality for interacting with the iCloud API.
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
const (
baseEndpoint = "https://www.icloud.com"
homeEndpoint = "https://www.icloud.com"
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
)
type sessionSave func(*Session)
// Client defines the client configuration
type Client struct {
appleID string
password string
srv *rest.Client
Session *Session
sessionSaveCallback sessionSave
drive *DriveService
}
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
//
// Parameters:
// - appleID: the Apple ID of the user.
// - password: the password of the user.
// - trustToken: the trust token for the session.
// - clientID: the client id for the session.
// - cookies: the cookies for the session.
// - sessionSaveCallback: the callback function to save the session.
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
icloud := &Client{
appleID: appleID,
password: password,
srv: rest.NewClient(fshttp.NewClient(context.Background())),
Session: NewSession(),
sessionSaveCallback: sessionSaveCallback,
}
icloud.Session.TrustToken = trustToken
icloud.Session.Cookies = cookies
icloud.Session.ClientID = clientID
return icloud, nil
}
// DriveService returns the DriveService instance associated with the Client.
func (c *Client) DriveService() (*DriveService, error) {
var err error
if c.drive == nil {
c.drive, err = NewDriveService(c)
if err != nil {
return nil, err
}
}
return c.drive, nil
}
// Request makes a request and retries it if the session is invalid.
//
// This function is the main entry point for making requests to the iCloud
// API. If the initial request returns a 401 (Unauthorized), it will try to
// reauthenticate and retry the request.
func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
resp, err = c.Session.Request(ctx, opts, request, response)
if err != nil && resp != nil {
// try to reauth
if resp.StatusCode == 401 || resp.StatusCode == 421 {
err = c.Authenticate(ctx)
if err != nil {
return nil, err
}
if c.Session.Requires2FA() {
return nil, errors.New("trust token expired, please reauth")
}
return c.RequestNoReAuth(ctx, opts, request, response)
}
}
return resp, err
}
// RequestNoReAuth makes a request without re-authenticating.
//
// This function is useful when you have a session that is already
// authenticated, but you need to make a request without triggering
// a re-authentication.
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
// Make the request without re-authenticating
resp, err = c.Session.Request(ctx, opts, request, response)
return resp, err
}
// Authenticate authenticates the client with the iCloud API.
func (c *Client) Authenticate(ctx context.Context) error {
if c.Session.Cookies != nil {
if err := c.Session.ValidateSession(ctx); err == nil {
fs.Debugf("icloud", "Valid session, no need to reauth")
return nil
}
c.Session.Cookies = nil
}
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
err := c.Session.SignIn(ctx, c.appleID, c.password)
if err == nil {
err = c.Session.AuthWithToken(ctx)
if err == nil && c.sessionSaveCallback != nil {
c.sessionSaveCallback(c.Session)
}
}
return err
}
// SignIn signs in the client using the provided context and credentials.
func (c *Client) SignIn(ctx context.Context) error {
return c.Session.SignIn(ctx, c.appleID, c.password)
}
// IntoReader marshals the provided values into a JSON encoded reader
func IntoReader(values any) (*bytes.Reader, error) {
m, err := json.Marshal(values)
if err != nil {
return nil, err
}
return bytes.NewReader(m), nil
}
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
type RequestError struct {
Status string
Text string
}
// Error satisfy the error interface.
func (e *RequestError) Error() string {
return fmt.Sprintf("%s: %s", e.Text, e.Status)
}
func newRequestError(Status string, Text string) *RequestError {
return &RequestError{
Status: strings.ToLower(Status),
Text: Text,
}
}
// newErr orf makes a new error from sprintf parameters.
func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError {
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
}

View File

@@ -1,913 +0,0 @@
package api
import (
"bytes"
"context"
"io"
"mime"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
const (
defaultZone = "com.apple.CloudDocs"
statusOk = "OK"
statusEtagConflict = "ETAG_CONFLICT"
)
// DriveService represents an iCloud Drive service.
type DriveService struct {
icloud *Client
RootID string
endpoint string
docsEndpoint string
}
// NewDriveService creates a new DriveService instance.
func NewDriveService(icloud *Client) (*DriveService, error) {
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
}
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
if err != nil {
return nil, resp, err
}
return items[0], resp, err
}
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
var err error
_items := []map[string]any{}
for _, id := range ids {
_items = append(_items, map[string]any{
"drivewsid": id,
"partialData": false,
"includeHierarchy": false,
})
}
var body *bytes.Reader
var path string
if !includeChildren {
values := []map[string]any{{
"items": _items,
}}
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetails"
} else {
values := _items
body, err = IntoReader(values)
if err != nil {
return nil, nil, err
}
path = "/retrieveItemDetailsInFolders"
}
opts := rest.Opts{
Method: "POST",
Path: path,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items, resp, err
}
// GetDocByPath retrieves a document by its path.
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "false")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetItemByPath retrieves a DriveItem by its path.
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
values := url.Values{}
values.Set("unified_format", "true")
body, err := IntoReader(path)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
Body: body,
}
var item []*DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item[0], resp, err
}
// GetDocByItemID retrieves a document by its item ID.
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
values := url.Values{}
values.Set("document_id", id)
values.Set("unified_format", "false") // important
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
var item *Document
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
opts := rest.Opts{
Method: "GET",
Path: "/v1/item/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var item *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
return item, resp, err
}
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
values := url.Values{}
values.Set("limit", strconv.FormatInt(limit, 10))
opts := rest.Opts{
Method: "GET",
Path: "/v1/enumerate/" + id,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Parameters: values,
}
items := struct {
Items []*DriveItemRaw `json:"drive_item"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
return items.Items, resp, err
}
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
_, zone, docid := DeconstructDriveID(id)
values := url.Values{}
values.Set("document_id", docid)
if zone == "" {
zone = defaultZone
}
opts := rest.Opts{
Method: "GET",
Path: "/ws/" + zone + "/download/by_id",
Parameters: values,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
}
var filer *FileRequest
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
if err != nil {
return "", resp, err
}
var url string
if filer.DataToken != nil {
url = filer.DataToken.URL
} else {
url = filer.PackageToken.URL
}
return url, resp, err
}
// DownloadFile downloads a file from the given URL using the provided options.
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
opts := &rest.Opts{
Method: "GET",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: url,
Options: opt,
}
resp, err := d.icloud.srv.Call(ctx, opts)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
}
// MoveItemToTrashByID moves an item to the trash based on the item ID.
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": drivewsid,
"etag": etag,
"clientId": drivewsid,
}}}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItemsToTrash",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
item := struct {
Items []*DriveItem `json:"items"`
}{}
resp, err := d.icloud.Request(ctx, opts, nil, &item)
if err != nil {
return nil, resp, err
}
if item.Items[0].Status != statusOk {
// rerun with latest etag
if force && item.Items[0].Status == "ETAG_CONFLICT" {
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
}
err = newRequestError(item.Items[0].Status, "unknown request status")
}
return item.Items[0], resp, err
}
// CreateNewFolderByItemID creates a new folder by item ID.
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
}
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": drivewsid,
"folders": []map[string]any{{
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
"name": name,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/createFolders",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var fResp *CreateFoldersResponse
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
if err != nil {
return nil, resp, err
}
status := fResp.Folders[0].Status
if status != statusOk {
err = newRequestError(status, "unknown request status")
}
return fResp.Folders[0], resp, err
}
// RenameItemByItemID renames a DriveItem by its item ID.
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
doc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
}
// RenameItemByDriveID renames a DriveItem by its drive ID.
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"items": []map[string]any{{
"drivewsid": id,
"name": name,
"etag": etag,
// "extension": split[1],
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/renameItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// MoveItemByItemID moves an item by its item ID to a destination item ID.
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
docSrc, resp, err := d.GetDocByItemID(ctx, id)
if err != nil {
return nil, resp, err
}
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
if err != nil {
return nil, resp, err
}
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
}
// MoveItemByDocID moves an item by its doc ID.
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
// }
// MoveItemByDriveID moves an item by its drive ID.
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
values := map[string]any{
"destinationDrivewsId": dstID,
"items": []map[string]any{{
"drivewsid": id,
"etag": etag,
"clientId": id,
}},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/moveItems",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.endpoint,
Body: body,
}
var items *DriveItem
resp, err := d.icloud.Request(ctx, opts, nil, &items)
if err != nil {
return nil, resp, err
}
status := items.Items[0].Status
if status != statusOk {
// rerun with latest etag
if force && status == "ETAG_CONFLICT" {
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
}
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
}
return items.Items[0], resp, err
}
// CopyDocByItemID copies a document by its item ID.
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
// putting name in info doesn't work. extension does work so assume this is a bug in the endpoint
values := map[string]any{
"info_to_update": map[string]any{},
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/v1/item/copy/" + itemID,
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var info *DriveItemRaw
resp, err := d.icloud.Request(ctx, opts, nil, &info)
if err != nil {
return nil, resp, err
}
return info, resp, err
}
// CreateUpload creates an url for an upload.
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
// first we need to request an upload url
values := map[string]any{
"filename": name,
"type": "FILE",
"size": strconv.FormatInt(size, 10),
"content_type": GetContentTypeForFile(name),
}
body, err := IntoReader(values)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/upload/web",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo []*UploadResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
return responseInfo[0], resp, err
}
// Upload uploads a file to the given url
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
// TODO: implement multipart upload
opts := rest.Opts{
Method: "POST",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: uploadURL,
Body: in,
ContentLength: &size,
ContentType: GetContentTypeForFile(name),
// MultipartContentName: "files",
MultipartFileName: name,
}
var singleFileResponse *SingleFileResponse
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
if err != nil {
return nil, resp, err
}
return singleFileResponse, resp, err
}
// UpdateFile updates a file in the DriveService.
//
// ctx: the context.Context object for the request.
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
body, err := IntoReader(r)
if err != nil {
return nil, nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/ws/" + defaultZone + "/update/documents",
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
RootURL: d.docsEndpoint,
Body: body,
}
var responseInfo *DocumentUpdateResponse
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
if err != nil {
return nil, resp, err
}
doc := responseInfo.Results[0].Document
item := DriveItem{
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
Docwsid: doc.DocumentID,
Itemid: doc.ItemID,
Etag: doc.Etag,
ParentID: doc.ParentID,
DateModified: time.Unix(r.Mtime, 0),
DateCreated: time.Unix(r.Mtime, 0),
Type: doc.Type,
Name: doc.Name,
Size: doc.Size,
}
return &item, resp, err
}
// UpdateFileInfo represents the information for an update to a file in the DriveService.
type UpdateFileInfo struct {
AllowConflict bool `json:"allow_conflict"`
Btime int64 `json:"btime"`
Command string `json:"command"`
CreateShortGUID bool `json:"create_short_guid"`
Data struct {
Receipt string `json:"receipt,omitempty"`
ReferenceSignature string `json:"reference_signature,omitempty"`
Signature string `json:"signature,omitempty"`
Size int64 `json:"size,omitempty"`
WrappingKey string `json:"wrapping_key,omitempty"`
} `json:"data,omitempty"`
DocumentID string `json:"document_id"`
FileFlags FileFlags `json:"file_flags"`
Mtime int64 `json:"mtime"`
Path struct {
Path string `json:"path"`
StartingDocumentID string `json:"starting_document_id"`
} `json:"path"`
}
// FileFlags defines the file flags for a document.
type FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsHidden bool `json:"is_hidden"`
IsWritable bool `json:"is_writable"`
}
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
//
// Returns an UpdateFileInfo object.
func NewUpdateFileInfo() UpdateFileInfo {
return UpdateFileInfo{
Command: "add_file",
CreateShortGUID: true,
AllowConflict: true,
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: true,
},
}
}
// DriveItemRaw is a raw drive item.
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
type DriveItemRaw struct {
ItemID string `json:"item_id"`
ItemInfo *DriveItemRawInfo `json:"item_info"`
}
// SplitName splits the name of a DriveItemRaw into its name and extension.
//
// It returns the name and extension as separate strings. If the name ends with a dot,
// it means there is no extension, so an empty string is returned for the extension.
// If the name does not contain a dot, it means
func (d *DriveItemRaw) SplitName() (string, string) {
name := d.ItemInfo.Name
// ends with a dot, no extension
if strings.HasSuffix(name, ".") {
return name, ""
}
lastInd := strings.LastIndex(name, ".")
if lastInd == -1 {
return name, ""
}
return name[:lastInd], name[lastInd+1:]
}
// ModTime returns the modification time of the DriveItemRaw.
//
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time value represents the modification time of the DriveItemRaw.
func (d *DriveItemRaw) ModTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// CreatedTime returns the creation time of the DriveItemRaw.
//
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
// If the parsing fails, it returns the zero value of time.Time.
// The returned time.Time
func (d *DriveItemRaw) CreatedTime() time.Time {
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
if err != nil {
return time.Time{}
}
return time.UnixMilli(i)
}
// DriveItemRawInfo is the raw information about a drive item.
type DriveItemRawInfo struct {
Name string `json:"name"`
// Extension is absolutely borked on endpoints so dont use it.
Extension string `json:"extension"`
Size int64 `json:"size,string"`
Type string `json:"type"`
Version string `json:"version"`
ModifiedAt string `json:"modified_at"`
CreatedAt string `json:"created_at"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
//
// It takes no parameters.
// It returns a pointer to a DriveItem.
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
name, extension := d.SplitName()
return &DriveItem{
Itemid: d.ItemID,
Name: name,
Extension: extension,
Type: d.ItemInfo.Type,
Etag: d.ItemInfo.Version,
DateModified: d.ModTime(),
DateCreated: d.CreatedTime(),
Size: d.ItemInfo.Size,
Urls: d.ItemInfo.Urls,
}
}
// DocumentUpdateResponse is the response of a document update request.
type DocumentUpdateResponse struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
Results []struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
OperationID any `json:"operation_id"`
Document *Document `json:"document"`
} `json:"results"`
}
// Document represents a document on iCloud.
type Document struct {
Status struct {
StatusCode int `json:"status_code"`
ErrorMessage string `json:"error_message"`
} `json:"status"`
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
Etag string `json:"etag"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Type string `json:"type"`
Deleted bool `json:"deleted"`
Mtime int64 `json:"mtime"`
LastEditorName string `json:"last_editor_name"`
Data DocumentData `json:"data"`
Size int64 `json:"size"`
Btime int64 `json:"btime"`
Zone string `json:"zone"`
FileFlags struct {
IsExecutable bool `json:"is_executable"`
IsWritable bool `json:"is_writable"`
IsHidden bool `json:"is_hidden"`
} `json:"file_flags"`
LastOpenedTime int64 `json:"lastOpenedTime"`
RestorePath any `json:"restorePath"`
HasChainedParent bool `json:"hasChainedParent"`
}
// DriveID returns the drive ID of the Document.
func (d *Document) DriveID() string {
if d.Zone == "" {
d.Zone = defaultZone
}
return d.Type + "::" + d.Zone + "::" + d.DocumentID
}
// DocumentData represents the data of a document.
type DocumentData struct {
Signature string `json:"signature"`
Owner string `json:"owner"`
Size int64 `json:"size"`
ReferenceSignature string `json:"reference_signature"`
WrappingKey string `json:"wrapping_key"`
PcsInfo string `json:"pcsInfo"`
}
// SingleFileResponse is the response of a single file request.
type SingleFileResponse struct {
SingleFile *SingleFileInfo `json:"singleFile"`
}
// SingleFileInfo represents the information of a single file.
type SingleFileInfo struct {
ReferenceSignature string `json:"referenceChecksum"`
Size int64 `json:"size"`
Signature string `json:"fileChecksum"`
WrappingKey string `json:"wrappingKey"`
Receipt string `json:"receipt"`
}
// UploadResponse is the response of an upload request.
type UploadResponse struct {
URL string `json:"url"`
DocumentID string `json:"document_id"`
}
// FileRequestToken represents the token of a file request.
type FileRequestToken struct {
URL string `json:"url"`
Token string `json:"token"`
Signature string `json:"signature"`
WrappingKey string `json:"wrapping_key"`
ReferenceSignature string `json:"reference_signature"`
}
// FileRequest represents the request of a file.
type FileRequest struct {
DocumentID string `json:"document_id"`
ItemID string `json:"item_id"`
OwnerDsid int64 `json:"owner_dsid"`
DataToken *FileRequestToken `json:"data_token,omitempty"`
PackageToken *FileRequestToken `json:"package_token,omitempty"`
DoubleEtag string `json:"double_etag"`
}
// CreateFoldersResponse is the response of a create folders request.
type CreateFoldersResponse struct {
Folders []*DriveItem `json:"folders"`
}
// DriveItem represents an item on iCloud.
type DriveItem struct {
DateCreated time.Time `json:"dateCreated"`
Drivewsid string `json:"drivewsid"`
Docwsid string `json:"docwsid"`
Itemid string `json:"item_id"`
Zone string `json:"zone"`
Name string `json:"name"`
ParentID string `json:"parentId"`
Hierarchy []DriveItem `json:"hierarchy"`
Etag string `json:"etag"`
Type string `json:"type"`
AssetQuota int64 `json:"assetQuota"`
FileCount int64 `json:"fileCount"`
ShareCount int64 `json:"shareCount"`
ShareAliasCount int64 `json:"shareAliasCount"`
DirectChildrenCount int64 `json:"directChildrenCount"`
Items []*DriveItem `json:"items"`
NumberOfItems int64 `json:"numberOfItems"`
Status string `json:"status"`
Extension string `json:"extension,omitempty"`
DateModified time.Time `json:"dateModified,omitempty"`
DateChanged time.Time `json:"dateChanged,omitempty"`
Size int64 `json:"size,omitempty"`
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
Urls struct {
URLDownload string `json:"url_download"`
} `json:"urls"`
}
// IsFolder returns true if the item is a folder.
func (d *DriveItem) IsFolder() bool {
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
}
// DownloadURL returns the download URL of the item.
func (d *DriveItem) DownloadURL() string {
return d.Urls.URLDownload
}
// FullName returns the full name of the item.
// name + extension
func (d *DriveItem) FullName() string {
if d.Extension != "" {
return d.Name + "." + d.Extension
}
return d.Name
}
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
func GetDocIDFromDriveID(id string) string {
split := strings.Split(id, "::")
return split[len(split)-1]
}
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
func DeconstructDriveID(id string) (docType, zone, docid string) {
split := strings.Split(id, "::")
if len(split) < 3 {
return "", "", id
}
return split[0], split[1], split[2]
}
// ConstructDriveID constructs a drive ID from the given components.
func ConstructDriveID(id string, zone string, t string) string {
return strings.Join([]string{t, zone, id}, "::")
}
// GetContentTypeForFile detects content type for given file name.
func GetContentTypeForFile(name string) string {
// detect MIME type by looking at the filename only
mimeType := mime.TypeByExtension(filepath.Ext(name))
if mimeType == "" {
// api requires a mime type passed in
mimeType = "text/plain"
}
return strings.Split(mimeType, ";")[0]
}

View File

@@ -1,406 +0,0 @@
package api
import (
"context"
"fmt"
"maps"
"net/http"
"net/url"
"slices"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/rest"
)
// Session represents an iCloud session
type Session struct {
SessionToken string `json:"session_token"`
Scnt string `json:"scnt"`
SessionID string `json:"session_id"`
AccountCountry string `json:"account_country"`
TrustToken string `json:"trust_token"`
ClientID string `json:"client_id"`
Cookies []*http.Cookie `json:"cookies"`
AccountInfo AccountInfo `json:"account_info"`
srv *rest.Client `json:"-"`
}
// String returns the session as a string
// func (s *Session) String() string {
// jsession, _ := json.Marshal(s)
// return string(jsession)
// }
// Request makes a request
func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) {
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
if err != nil {
return resp, err
}
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
s.AccountCountry = val
}
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
s.SessionID = val
}
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
s.SessionToken = val
}
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
s.TrustToken = val
}
if val := resp.Header.Get("scnt"); val != "" {
s.Scnt = val
}
return resp, nil
}
// Requires2FA returns true if the session requires 2FA
func (s *Session) Requires2FA() bool {
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
}
// SignIn signs in the session
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
trustTokens := []string{}
if s.TrustToken != "" {
trustTokens = []string{s.TrustToken}
}
values := map[string]any{
"accountName": appleID,
"password": password,
"rememberMe": true,
"trustTokens": trustTokens,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/signin",
Parameters: url.Values{},
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
RootURL: authEndpoint,
IgnoreStatus: true, // need to handle 409 for hsa2
NoResponse: true,
Body: body,
}
opts.Parameters.Set("isRememberMeEnabled", "true")
_, err = s.Request(ctx, opts, nil, nil)
return err
}
// AuthWithToken authenticates the session
func (s *Session) AuthWithToken(ctx context.Context) error {
values := map[string]any{
"accountCountryCode": s.AccountCountry,
"dsWebAuthToken": s.SessionToken,
"extended_login": true,
"trustToken": s.TrustToken,
}
body, err := IntoReader(values)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/accountLogin",
ExtraHeaders: GetCommonHeaders(map[string]string{}),
RootURL: setupEndpoint,
Body: body,
}
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err == nil {
s.Cookies = resp.Cookies()
}
return err
}
// Validate2FACode validates the 2FA code
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
values := map[string]any{"securityCode": map[string]string{"code": code}}
body, err := IntoReader(values)
if err != nil {
return err
}
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "POST",
Path: "/verify/trusteddevice/securitycode",
ExtraHeaders: headers,
RootURL: authEndpoint,
Body: body,
NoResponse: true,
}
_, err = s.Request(ctx, opts, nil, nil)
if err == nil {
if err := s.TrustSession(ctx); err != nil {
return err
}
return nil
}
return fmt.Errorf("validate2FACode failed: %w", err)
}
// TrustSession trusts the session
func (s *Session) TrustSession(ctx context.Context) error {
headers := s.GetAuthHeaders(map[string]string{})
headers["scnt"] = s.Scnt
headers["X-Apple-ID-Session-Id"] = s.SessionID
opts := rest.Opts{
Method: "GET",
Path: "/2sv/trust",
ExtraHeaders: headers,
RootURL: authEndpoint,
NoResponse: true,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, nil)
if err != nil {
return fmt.Errorf("trustSession failed: %w", err)
}
return s.AuthWithToken(ctx)
}
// ValidateSession validates the session
func (s *Session) ValidateSession(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "/validate",
ExtraHeaders: s.GetHeaders(map[string]string{}),
RootURL: setupEndpoint,
ContentLength: common.Int64(0),
}
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
if err != nil {
return fmt.Errorf("validateSession failed: %w", err)
}
return nil
}
// GetAuthHeaders returns the authentication headers for the session.
//
// It takes an `overwrite` map[string]string parameter which allows
// overwriting the default headers. It returns a map[string]string.
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Accept": "application/json",
"Content-Type": "application/json",
"X-Apple-OAuth-Client-Id": s.ClientID,
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
"X-Apple-OAuth-Require-Grant-Code": "true",
"X-Apple-OAuth-Response-Mode": "web_message",
"X-Apple-OAuth-Response-Type": "code",
"X-Apple-OAuth-State": s.ClientID,
"X-Apple-Widget-Key": s.ClientID,
"Origin": homeEndpoint,
"Referer": fmt.Sprintf("%s/", homeEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
maps.Copy(headers, overwrite)
return headers
}
// GetHeaders Gets the authentication headers required for a request
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
headers := GetCommonHeaders(map[string]string{})
headers["Cookie"] = s.GetCookieString()
maps.Copy(headers, overwrite)
return headers
}
// GetCookieString returns the cookie header string for the session.
func (s *Session) GetCookieString() string {
cookieHeader := ""
// we only care about name and value.
for _, cookie := range s.Cookies {
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
}
return cookieHeader
}
// GetCommonHeaders generates common HTTP headers with optional overwrite.
func GetCommonHeaders(overwrite map[string]string) map[string]string {
headers := map[string]string{
"Content-Type": "application/json",
"Origin": baseEndpoint,
"Referer": fmt.Sprintf("%s/", baseEndpoint),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
}
maps.Copy(headers, overwrite)
return headers
}
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
var hashes []string
for _, cookie := range right {
hashes = append(hashes, cookie.Raw)
}
for _, cookie := range left {
if !slices.Contains(hashes, cookie.Raw) {
right = append(right, cookie)
}
}
return right, nil
}
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
var domainCookies []*http.Cookie
for _, cookie := range cookies {
if strings.HasSuffix(url.Host, cookie.Domain) {
domainCookies = append(domainCookies, cookie)
}
}
return domainCookies, nil
}
// NewSession creates a new Session instance with default values.
func NewSession() *Session {
session := &Session{}
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
//session.ClientID = "auth-" + uuid.New().String()
return session
}
// AccountInfo represents an account info
type AccountInfo struct {
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
ICDPEnabled bool `json:"iCDPEnabled"`
Webservices map[string]*webService `json:"webservices"`
PcsEnabled bool `json:"pcsEnabled"`
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
ConfigBag struct {
Urls struct {
AccountCreateUI string `json:"accountCreateUI"`
AccountLoginUI string `json:"accountLoginUI"`
AccountLogin string `json:"accountLogin"`
AccountRepairUI string `json:"accountRepairUI"`
DownloadICloudTerms string `json:"downloadICloudTerms"`
RepairDone string `json:"repairDone"`
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
VettingURLForEmail string `json:"vettingUrlForEmail"`
AccountCreate string `json:"accountCreate"`
GetICloudTerms string `json:"getICloudTerms"`
VettingURLForPhone string `json:"vettingUrlForPhone"`
} `json:"urls"`
AccountCreateEnabled bool `json:"accountCreateEnabled"`
} `json:"configBag"`
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
AppsOrder []string `json:"appsOrder"`
Version int `json:"version"`
IsExtendedLogin bool `json:"isExtendedLogin"`
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
IsRepairNeeded bool `json:"isRepairNeeded"`
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
RequestInfo struct {
Country string `json:"country"`
TimeZone string `json:"timeZone"`
Region string `json:"region"`
} `json:"requestInfo"`
PcsDeleted bool `json:"pcsDeleted"`
ICloudInfo struct {
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
} `json:"iCloudInfo"`
Apps map[string]*ValidateDataApp `json:"apps"`
}
// ValidateDataDsInfo represents an validation info
type ValidateDataDsInfo struct {
HsaVersion int `json:"hsaVersion"`
LastName string `json:"lastName"`
ICDPEnabled bool `json:"iCDPEnabled"`
TantorMigrated bool `json:"tantorMigrated"`
Dsid string `json:"dsid"`
HsaEnabled bool `json:"hsaEnabled"`
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
IroncadeMigrated bool `json:"ironcadeMigrated"`
Locale string `json:"locale"`
BrZoneConsolidated bool `json:"brZoneConsolidated"`
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
IsManagedAppleID bool `json:"isManagedAppleID"`
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
Gilligvited bool `json:"gilligvited"`
AppleIDAliases []any `json:"appleIdAliases"`
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
IsPaidDeveloper bool `json:"isPaidDeveloper"`
CountryCode string `json:"countryCode"`
NotificationID string `json:"notificationId"`
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
ADsID string `json:"aDsID"`
Locked bool `json:"locked"`
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
PrimaryEmail string `json:"primaryEmail"`
AppleIDEntries []struct {
IsPrimary bool `json:"isPrimary"`
Type string `json:"type"`
Value string `json:"value"`
} `json:"appleIdEntries"`
GilliganEnabled bool `json:"gilligan-enabled"`
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
FullName string `json:"fullName"`
MailFlags struct {
IsThreadingAvailable bool `json:"isThreadingAvailable"`
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
SCKMail bool `json:"sCKMail"`
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
} `json:"mailFlags"`
LanguageCode string `json:"languageCode"`
AppleID string `json:"appleId"`
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
FirstName string `json:"firstName"`
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
NotesMigrated bool `json:"notesMigrated"`
BeneficiaryInfo struct {
IsBeneficiary bool `json:"isBeneficiary"`
} `json:"beneficiaryInfo"`
HasPaymentInfo bool `json:"hasPaymentInfo"`
PcsDelet bool `json:"pcsDelet"`
AppleIDAlias string `json:"appleIdAlias"`
BrMigrated bool `json:"brMigrated"`
StatusCode int `json:"statusCode"`
FamilyEligible bool `json:"familyEligible"`
}
// ValidateDataApp represents an app
type ValidateDataApp struct {
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
}
// WebService represents a web service
type webService struct {
PcsRequired bool `json:"pcsRequired"`
URL string `json:"url"`
UploadURL string `json:"uploadUrl"`
Status string `json:"status"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
//go:build !plan9 && !solaris
package iclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/iclouddrive"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestICloudDrive:",
NilObject: (*iclouddrive.Object)(nil),
})
}

View File

@@ -1,7 +0,0 @@
// Build for iclouddrive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris
// Package iclouddrive implements the iCloud Drive backend
package iclouddrive

View File

@@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"` DestinationPath string `validate:"nonzero" json:"destinationPath"`
} }
// JobIDResponse represents response struct with JobID for folder operations // JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct { type JobIDResponse struct {
JobID string `json:"jobId"` JobID string `json:"jobId"`
} }

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"slices"
"strconv" "strconv"
"time" "time"
@@ -143,7 +142,12 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
if resp == nil { if resp == nil {
return false return false
} }
return slices.Contains(retryErrorCodes, resp.StatusCode) for _, e := range retryErrorCodes {
if resp.StatusCode == e {
return true
}
}
return false
} }
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {

View File

@@ -13,7 +13,6 @@ import (
"net/url" "net/url"
"path" "path"
"regexp" "regexp"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@@ -152,19 +151,6 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.", Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
Default: "https://archive.org", Default: "https://archive.org",
Advanced: true, Advanced: true,
}, {
Name: "item_metadata",
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
Default: []string{},
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "item_derive",
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
Default: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Don't ask the server to test against MD5 checksum calculated by rclone. Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
@@ -201,7 +187,7 @@ Only enable if you need to be guaranteed to be reflected after write operations.
const iaItemMaxSize int64 = 1099511627776 const iaItemMaxSize int64 = 1099511627776
// metadata keys that are not writeable // metadata keys that are not writeable
var roMetadataKey = map[string]any{ var roMetadataKey = map[string]interface{}{
// do not add mtime here, it's a documented exception // do not add mtime here, it's a documented exception
"name": nil, "source": nil, "size": nil, "md5": nil, "name": nil, "source": nil, "size": nil, "md5": nil,
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil, "crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
@@ -215,8 +201,6 @@ type Options struct {
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
FrontEndpoint string `config:"front_endpoint"` FrontEndpoint string `config:"front_endpoint"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
ItemMetadata []string `config:"item_metadata"`
ItemDerive bool `config:"item_derive"`
WaitArchive fs.Duration `config:"wait_archive"` WaitArchive fs.Duration `config:"wait_archive"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -806,23 +790,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
"x-amz-filemeta-rclone-update-track": updateTracker, "x-amz-filemeta-rclone-update-track": updateTracker,
// we add some more headers for intuitive actions // we add some more headers for intuitive actions
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already "x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way "x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds) "x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself) "x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
} }
if size >= 0 { if size >= 0 {
headers["Content-Length"] = fmt.Sprintf("%d", size) headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size) headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
} }
// This is IA's ITEM metadata, not file metadata
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
if err != nil {
return err
}
var mdata fs.Metadata var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options) mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
if err == nil && mdata != nil { if err == nil && mdata != nil {
@@ -885,51 +863,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
metadataCounter := make(map[string]int)
metadataValues := make(map[string][]string)
// First pass: count occurrences and collect values
for _, v := range options.ItemMetadata {
parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 {
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
}
key, value := parts[0], parts[1]
metadataCounter[key]++
metadataValues[key] = append(metadataValues[key], value)
}
// Second pass: add headers with appropriate prefixes
for key, count := range metadataCounter {
if count == 1 {
// Only one occurrence, use x-archive-meta-
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
} else {
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
for i, value := range metadataValues[key] {
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
}
}
}
if o.fs.opt.ItemDerive {
headers["x-archive-queue-derive"] = "1"
} else {
headers["x-archive-queue-derive"] = "0"
}
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
for k, v := range headers {
if strings.HasPrefix(k, "x-archive-meta") {
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
}
}
return headers, nil
}
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -992,8 +925,10 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil { if resp != nil {
if slices.Contains(retryErrorCodes, resp.StatusCode) { for _, e := range retryErrorCodes {
return true, err if resp.StatusCode == e {
return true, err
}
} }
} }
// Ok, not an awserr, check for generic failure conditions // Ok, not an awserr, check for generic failure conditions
@@ -1146,7 +1081,13 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
} }
fileTrackers, _ := listOrString(iaFile.UpdateTrack) fileTrackers, _ := listOrString(iaFile.UpdateTrack)
trackerMatch := slices.Contains(fileTrackers, tracker) trackerMatch := false
for _, v := range fileTrackers {
if v == tracker {
trackerMatch = true
break
}
}
if !trackerMatch { if !trackerMatch {
continue continue
} }

View File

@@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// MarshalJSON turns a Rfc3339Time into JSON // MarshalJSON turns a Rfc3339Time into JSON
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) { func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
return fmt.Appendf(nil, "\"%s\"", t.String()), nil return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
} }
// LoginToken is struct representing the login token generated in the WebUI // LoginToken is struct representing the login token generated in the WebUI
@@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct {
// CustomerInfo provides general information about the account. Required for finding the correct internal username. // CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct { type CustomerInfo struct {
Username string `json:"username"` Username string `json:"username"`
Email string `json:"email"` Email string `json:"email"`
Name string `json:"name"` Name string `json:"name"`
CountryCode string `json:"country_code"` CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"` LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"` CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"` BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"` AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"` SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"` Usage int64 `json:"usage"`
Quota int64 `json:"quota"` Quota int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"` BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"` BusinessQuota int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"` WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"` ReadLocked bool `json:"read_locked"`
LockedCause any `json:"locked_cause"` LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"` WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"` AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"` IOSHash string `json:"ios_hash"`
} }
// TrashResponse is returned when emptying the Trash // TrashResponse is returned when emptying the Trash

View File

@@ -31,7 +31,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
@@ -277,9 +277,11 @@ machines.`)
m.Set(configClientID, teliaseCloudClientID) m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL) m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: teliaseCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: teliaseCloudTokenURL, AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaseCloudClientID, ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -290,9 +292,11 @@ machines.`)
m.Set(configClientID, telianoCloudClientID) m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL) m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: telianoCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: telianoCloudTokenURL, AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID, ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -303,9 +307,11 @@ machines.`)
m.Set(configClientID, tele2CloudClientID) m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL) m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: tele2CloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: tele2CloudTokenURL, AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID, ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -316,9 +322,11 @@ machines.`)
m.Set(configClientID, onlimeCloudClientID) m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL) m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{ OAuth2Config: &oauth2.Config{
AuthURL: onlimeCloudAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: onlimeCloudTokenURL, AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID, ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
} }
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauthutil.Config{ oauthConfig := &oauth2.Config{
AuthURL: defaultTokenURL, Endpoint: oauth2.Endpoint{
TokenURL: defaultTokenURL, AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
} }
if ver == configVersion { if ver == configVersion {
oauthConfig.ClientID = defaultClientID oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults // if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok { if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.TokenURL = tokenURL oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL // jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.AuthURL = tokenURL oauthConfig.Endpoint.AuthURL = tokenURL
} }
} else if ver == legacyConfigVersion { } else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID) clientID, ok := m.Get(configClientID)
@@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.ClientID = clientID oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret) oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.TokenURL = legacyTokenURL oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.AuthURL = legacyTokenURL oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh // add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface { if do, ok := baseClient.Transport.(interface {
@@ -1264,7 +1274,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Parameters: url.Values{}, Parameters: url.Values{},
} }
opts.Parameters.Set("mode", "liststream") opts.Parameters.Set("mode", "liststream")
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {

View File

@@ -193,7 +193,7 @@ func (o *Object) set(e *entity) {
// Call linkbox with the query in opts and return result // Call linkbox with the query in opts and return result
// //
// This will be checked for error and an error will be returned if Status != 1 // This will be checked for error and an error will be returned if Status != 1
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error { func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, opts, nil, &result) resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)

View File

@@ -5,18 +5,18 @@ package local
import ( import (
"context" "context"
"fmt" "fmt"
"syscall"
"unsafe" "unsafe"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
) )
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64 var available, total, free int64
root, e := windows.UTF16PtrFromString(f.root) root, e := syscall.UTF16PtrFromString(f.root)
if e != nil { if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e) return nil, fmt.Errorf("failed to read disk usage: %w", e)
} }
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
) )
if e1 != windows.Errno(0) { if e1 != syscall.Errno(0) {
return nil, fmt.Errorf("failed to read disk usage: %w", e1) return nil, fmt.Errorf("failed to read disk usage: %w", e1)
} }
usage := &fs.Usage{ usage := &fs.Usage{

View File

@@ -34,6 +34,7 @@ import (
// Constants // Constants
const ( const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
) )
@@ -100,8 +101,10 @@ Metadata is supported on files and directories.
}, },
{ {
Name: "links", Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.", Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false, Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true, Advanced: true,
}, },
{ {
@@ -376,22 +379,17 @@ type Directory struct {
var ( var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links") errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links") errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
) )
// NewFs constructs an Fs from the path // NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Override --local-links with --links if set
if ci.Links {
opt.TranslateSymlinks = true
}
if opt.TranslateSymlinks && opt.FollowSymlinks { if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks return nil, errLinksAndCopyLinks
} }
@@ -437,9 +435,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dev = readDevice(fi, f.opt.OneFileSystem) f.dev = readDevice(fi, f.opt.OneFileSystem)
} }
// Check to see if this is a .rclonelink if not found // Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix) hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) { if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix)) fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
} }
if err == nil && f.isRegular(fi.Mode()) { if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix // Handle the odd case, that a symlink was specified by name without the link suffix
@@ -510,8 +508,8 @@ func (f *Fs) caseInsensitive() bool {
// //
// for regular files, localPath is returned unchanged // for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) { func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix) isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix) newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink return newLocalPath, isTranslatedLink
} }
@@ -694,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else { } else {
// Check whether this link should be translated // Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix newRemote += linkSuffix
} }
// Don't include non directory if not included // Don't include non directory if not included
// we leave directory filtering to the layer above // we leave directory filtering to the layer above
@@ -1046,7 +1044,7 @@ you can try to change the output.`,
// The result should be capable of being JSON encoded // The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user // If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that // otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name { switch name {
case "noop": case "noop":
if txt, ok := opt["error"]; ok { if txt, ok := opt["error"]; ok {
@@ -1056,7 +1054,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
return nil, errors.New(txt) return nil, errors.New(txt)
} }
if _, ok := opt["echo"]; ok { if _, ok := opt["echo"]; ok {
out := map[string]any{} out := map[string]interface{}{}
out["name"] = name out["name"] = name
out["arg"] = arg out["arg"] = arg
out["opt"] = opt out["opt"] = opt

View File

@@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
src.(*Object).fs.opt.NoCheckUpdated = true src.(*Object).fs.opt.NoCheckUpdated = true
for i := range 100 { for i := 0; i < 100; i++ {
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background())) go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
} }
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src) _, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
@@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2)) require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink // Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2) file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination // Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1) file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink // Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false) file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported) fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes { if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3) r.CheckLocalItems(t, file1, file2, file3)
@@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText) assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object // Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix) o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote()) assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size()) assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version // Check that NewObject doesn't see the non suffixed version
@@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links // Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{ f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true", "links": "true",
}) })
require.Equal(t, fs.ErrorIsFile, err) require.Equal(t, fs.ErrorIsFile, err)
@@ -277,7 +277,7 @@ func TestMetadata(t *testing.T) {
// Write a symlink to the file // Write a symlink to the file
symlinkPath := "metafile-link.txt" symlinkPath := "metafile-link.txt"
osSymlinkPath := filepath.Join(f.root, symlinkPath) osSymlinkPath := filepath.Join(f.root, symlinkPath)
symlinkPath += fs.LinkSuffix symlinkPath += linkSuffix
require.NoError(t, os.Symlink(filePath, osSymlinkPath)) require.NoError(t, os.Symlink(filePath, osSymlinkPath))
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z") symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime)) require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))

View File

@@ -63,8 +63,8 @@ type UserInfoResponse struct {
Prolong bool `json:"prolong"` Prolong bool `json:"prolong"`
Promocodes struct { Promocodes struct {
} `json:"promocodes"` } `json:"promocodes"`
Subscription []any `json:"subscription"` Subscription []interface{} `json:"subscription"`
Version string `json:"version"` Version string `json:"version"`
} `json:"billing"` } `json:"billing"`
Bonuses struct { Bonuses struct {
CameraUpload bool `json:"camera_upload"` CameraUpload bool `json:"camera_upload"`

View File

@@ -68,12 +68,14 @@ var (
) )
// Description of how to authorize // Description of how to authorize
var oauthConfig = &oauthutil.Config{ var oauthConfig = &oauth2.Config{
ClientID: api.OAuthClientID, ClientID: api.OAuthClientID,
ClientSecret: "", ClientSecret: "",
AuthURL: api.OAuthURL, Endpoint: oauth2.Endpoint{
TokenURL: api.OAuthURL, AuthURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams, TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
},
} }
// Register with Fs // Register with Fs
@@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) { if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.") fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(ctx, f.cli) ctx := oauthutil.Context(ctx, f.cli)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
oauth2Conf := oauthConfig.MakeOauth2Config()
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
} }
if err == nil && !tokenIsValid(t) { if err == nil && !tokenIsValid(t) {
err = errors.New("invalid token") err = errors.New("invalid token")
@@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
return nil, nil return nil, nil
case api.ListParseUnknown15: case api.ListParseUnknown15:
skip := int(r.ReadPu32()) skip := int(r.ReadPu32())
for range skip { for i := 0; i < skip; i++ {
r.ReadPu32() r.ReadPu32()
r.ReadPu32() r.ReadPu32()
} }
@@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) { func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupGlobs = nil f.speedupGlobs = nil
f.speedupAny = false f.speedupAny = false
uniqueValidPatterns := make(map[string]any) uniqueValidPatterns := make(map[string]interface{})
for _, pattern := range strings.Split(patternString, ",") { for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern)) pattern = strings.ToLower(strings.TrimSpace(pattern))
@@ -2131,7 +2131,10 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
if limit < 0 { if limit < 0 {
limit = size - offset limit = size - offset
} }
end = min(offset+limit, size) end = offset + limit
if end > size {
end = size
}
partial = !(offset == 0 && end == size) partial = !(offset == 0 && end == size)
return offset, end, partial return offset, end, partial
} }

View File

@@ -11,7 +11,7 @@ import (
func testChunk(t *testing.T, chunk int) { func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk) data := make([]byte, chunk)
for i := range chunk { for i := 0; i < chunk; i++ {
data[i] = 'A' data[i] = 'A'
} }
for _, test := range []struct { for _, test := range []struct {

View File

@@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"io" "io"
"path" "path"
"slices"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -219,11 +218,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
srv = mega.New().SetClient(fshttp.NewClient(ctx)) srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS) srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...any) { srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...) fs.Infof("*go-mega*", format, v...)
}) })
if opt.Debug { if opt.Debug {
srv.SetDebugger(func(format string, v ...any) { srv.SetDebugger(func(format string, v ...interface{}) {
fs.Debugf("*go-mega*", format, v...) fs.Debugf("*go-mega*", format, v...)
}) })
} }
@@ -499,8 +498,11 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
if err != nil { if err != nil {
return false, fmt.Errorf("list failed: %w", err) return false, fmt.Errorf("list failed: %w", err)
} }
if slices.ContainsFunc(nodes, fn) { for _, item := range nodes {
found = true if fn(item) {
found = true
break
}
} }
return return
} }
@@ -1154,7 +1156,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the chunks // Upload the chunks
// FIXME do this in parallel // FIXME do this in parallel
for id := range u.Chunks() { for id := 0; id < u.Chunks(); id++ {
_, chunkSize, err := u.ChunkLocation(id) _, chunkSize, err := u.ChunkLocation(id)
if err != nil { if err != nil {
return fmt.Errorf("upload failed to read chunk location: %w", err) return fmt.Errorf("upload failed to read chunk location: %w", err)

View File

@@ -17,7 +17,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
) )
@@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
entries := fs.DirEntries{} entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error { err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {

View File

@@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) {
r.Fremote.Features().Disable("Purge") // force fallback-purge r.Fremote.Features().Disable("Purge") // force fallback-purge
// make a lot of files to prevent it from finishing too quickly // make a lot of files to prevent it from finishing too quickly
for i := range 100 { for i := 0; i < 100; i++ {
dst := "file" + fmt.Sprint(i) + ".txt" dst := "file" + fmt.Sprint(i) + ".txt"
r.WriteObject(ctx, dst, "hello", t1) r.WriteObject(ctx, dst, "hello", t1)
} }

View File

@@ -28,7 +28,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
) )
@@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// Command the backend to run a named commands: du and symlink // Command the backend to run a named commands: du and symlink
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name { switch name {
case "du": case "du":
// No arg parsing needed, the path is passed in the fs // No arg parsing needed, the path is passed in the fs
@@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
} }
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
for resumeStart := u.Path; resumeStart != ""; { for resumeStart := u.Path; resumeStart != ""; {
var files []File var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path) files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)
@@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function, // callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
// depending on whether the response is required // depending on whether the response is required
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) { func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: method, Method: method,
RootURL: URL, RootURL: URL,
@@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
} }
// netStorageDuRequest performs a NetStorage du request // netStorageDuRequest performs a NetStorage du request
func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) { func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
URL := f.url("") URL := f.url("")
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8" const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
duResp := &Du{} duResp := &Du{}
@@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
} }
// netStorageDuRequest performs a NetStorage symlink request // netStorageDuRequest performs a NetStorage symlink request
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) { func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) {
target := url.QueryEscape(strings.TrimSuffix(dst, "/")) target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
actionHeader := "version=1&action=symlink&target=" + target actionHeader := "version=1&action=symlink&target=" + target
if modTime != nil { if modTime != nil {

View File

@@ -6,7 +6,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"slices"
"strings" "strings"
"time" "time"
@@ -15,6 +14,7 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/errcount" "github.com/rclone/rclone/lib/errcount"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
) )
const ( const (
@@ -396,57 +396,10 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
return nil return nil
} }
// Order the permissions so that any with users come first.
//
// This is to work around a quirk with Graph:
//
// 1. You are adding permissions for both a group and a user.
// 2. The user is a member of the group.
// 3. The permissions for the group and user are the same.
// 4. You are adding the group permission before the user permission.
//
// When all of the above are true, Graph indicates it has added the
// user permission, but it immediately drops it
//
// See: https://github.com/rclone/rclone/issues/8465
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
// Return true if identity has any user permissions
hasUserIdentity := func(identity *api.IdentitySet) bool {
if identity == nil {
return false
}
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
}
// Return true if p has any user permissions
hasUser := func(p *api.PermissionsType) bool {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
aHasUser := hasUser(a)
bHasUser := hasUser(b)
if aHasUser && !bHasUser {
return -1
} else if !aHasUser && bHasUser {
return 1
}
return 0
})
}
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues // sortPermissions sorts the permissions (to be written) into add, update, and remove queues
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) { func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
new, old := m.queuedPermissions, m.permissions new, old := m.queuedPermissions, m.permissions
if len(old) == 0 || m.permsAddOnly { if len(old) == 0 || m.permsAddOnly {
m.orderPermissions(new)
return new, nil, nil // they must all be "add" return new, nil, nil // they must all be "add"
} }
@@ -494,9 +447,6 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
remove = append(remove, o) remove = append(remove, o)
} }
} }
m.orderPermissions(add)
m.orderPermissions(update)
m.orderPermissions(remove)
return add, update, remove return add, update, remove
} }

View File

@@ -1,125 +0,0 @@
package onedrive
import (
"encoding/json"
"testing"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderPermissions(t *testing.T) {
tests := []struct {
name string
input []*api.PermissionsType
expected []string
}{
{
name: "empty",
input: []*api.PermissionsType{},
expected: []string(nil),
},
{
name: "users first, then group, then none",
input: []*api.PermissionsType{
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
{ID: "4"},
},
expected: []string{"2", "3", "1", "4"},
},
{
name: "same type unsorted",
input: []*api.PermissionsType{
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
},
expected: []string{"c", "b", "a"},
},
{
name: "all user identities",
input: []*api.PermissionsType{
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
},
expected: []string{"c", "a", "b"},
},
{
name: "no user or group info",
input: []*api.PermissionsType{
{ID: "z"},
{ID: "x"},
{ID: "y"},
},
expected: []string{"z", "x", "y"},
},
}
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
t.Run(driveType, func(t *testing.T) {
for _, tt := range tests {
m := &Metadata{fs: &Fs{driveType: driveType}}
t.Run(tt.name, func(t *testing.T) {
if driveType == driveTypeBusiness {
for i := range tt.input {
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
tt.input[i].GrantedTo = nil
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
tt.input[i].GrantedToIdentities = nil
}
}
m.orderPermissions(tt.input)
var gotIDs []string
for _, p := range tt.input {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, tt.expected, gotIDs)
})
}
})
}
}
func TestOrderPermissionsJSON(t *testing.T) {
testJSON := `[
{
"id": "1",
"grantedToV2": {
"group": {
"id": "group@example.com"
}
},
"roles": [
"write"
]
},
{
"id": "2",
"grantedToV2": {
"user": {
"id": "user@example.com"
}
},
"roles": [
"write"
]
}
]`
var testPerms []*api.PermissionsType
err := json.Unmarshal([]byte(testJSON), &testPerms)
require.NoError(t, err)
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
m.orderPermissions(testPerms)
var gotIDs []string
for _, p := range testPerms {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, []string{"2", "1"}, gotIDs)
}

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
@@ -41,6 +40,7 @@ import (
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
) )
const ( const (
@@ -65,21 +65,14 @@ const (
// Globals // Globals
var ( var (
authPath = "/common/oauth2/v2.0/authorize"
// Define the paths used for token operations tokenPath = "/common/oauth2/v2.0/token"
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
authPath = "/oauth2/v2.0/authorize"
tokenPath = "/oauth2/v2.0/token"
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"} scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"} scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// When using client credential OAuth flow, scope of .default is required in order // Description of how to auth for this app for a business account
// to use the permissions configured for the application within the tenant oauthConfig = &oauth2.Config{
scopeAccessClientCred = fs.SpaceSepList{".default"}
// Base config for how to auth
oauthConfig = &oauthutil.Config{
Scopes: scopeAccess, Scopes: scopeAccess,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -132,7 +125,7 @@ func init() {
Help: "Microsoft Cloud for US Government", Help: "Microsoft Cloud for US Government",
}, { }, {
Value: regionDE, Value: regionDE,
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).", Help: "Microsoft Cloud Germany",
}, { }, {
Value: regionCN, Value: regionCN,
Help: "Azure and Office 365 operated by Vnet Group in China", Help: "Azure and Office 365 operated by Vnet Group in China",
@@ -190,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true", Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
}, },
}, },
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
Set this if using
- Client Credential flow
`,
Sensitive: true,
}, { }, {
Name: "disable_site_permission", Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission. Help: `Disable the request for Sites.Read.All permission.
@@ -542,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}) })
} }
// Make the oauth config for the backend
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
// Copy the default oauthConfig
oauthConfig := *oauthConfig
// Set the scopes
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
// Construct the auth URLs
prefix := commonPathPrefix
if opt.Tenant != "" {
prefix = "/" + opt.Tenant
}
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
// Check to see if we are using client credentials flow
if opt.ClientCredentials {
// Override scope to .default
oauthConfig.Scopes = scopeAccessClientCred
if opt.Tenant == "" {
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
}
}
return &oauthConfig, nil
}
// Config the backend // Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) { func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options) region, graphURL := getRegionURL(m)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
_, graphURL := getRegionURL(m)
// Check to see if this is the start of the state machine execution if config.State == "" {
if conf.State == "" { var accessScopes fs.SpaceSepList
conf, err := makeOauthConfig(ctx, opt) accessScopesString, _ := m.Get("access_scopes")
err := accessScopes.Set(accessScopesString)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
}
oauthConfig.Scopes = []string(accessScopes)
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
} }
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{ return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: conf, OAuth2Config: oauthConfig,
}) })
} }
@@ -597,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to configure OneDrive: %w", err) return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
} }
// Create a REST client, build on the OAuth client created above
srv := rest.NewClient(oAuthClient) srv := rest.NewClient(oAuthClient)
switch conf.State { switch config.State {
case "choose_type": case "choose_type":
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{ return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive", Value: "onedrive",
@@ -627,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}}) }})
case "choose_type_done": case "choose_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result) return fs.ConfigGoto(config.Result)
case "onedrive": case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{ opts: rest.Opts{
@@ -645,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}, },
}) })
case "driveid": case "driveid":
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID") return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
if err != nil {
return out, err
}
// Default the drive_id to the previous version in the config
out.Option.Default, _ = m.Get("drive_id")
return out, nil
case "driveid_end": case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: conf.Result, finalDriveID: config.Result,
}) })
case "siteid": case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID") return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end": case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result, siteID: config.Result,
}) })
case "url": case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL return fs.ConfigInput("url_end", "config_site_url", `Site URL
@@ -671,7 +622,7 @@ Examples:
- "https://XXX.sharepoint.com/teams/ID" - "https://XXX.sharepoint.com/teams/ID"
`) `)
case "url_end": case "url_end":
siteURL := conf.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
@@ -686,12 +637,12 @@ Examples:
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`) return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end": case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: conf.Result, relativePath: config.Result,
}) })
case "search": case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`) return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end": case "search_end":
searchTerm := conf.Result searchTerm := config.Result
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
RootURL: graphURL, RootURL: graphURL,
@@ -713,10 +664,10 @@ Examples:
}) })
case "search_sites": case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result, siteID: config.Result,
}) })
case "driveid_final": case "driveid_final":
finalDriveID := conf.Result finalDriveID := config.Result
// Test the driveID and get drive type // Test the driveID and get drive type
opts := rest.Opts{ opts := rest.Opts{
@@ -735,12 +686,12 @@ Examples:
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)) return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end": case "driveid_final_end":
if conf.Result == "true" { if config.Result == "true" {
return nil, nil return nil, nil
} }
return fs.ConfigGoto("choose_type") return fs.ConfigGoto("choose_type")
} }
return nil, fmt.Errorf("unknown state %q", conf.State) return nil, fmt.Errorf("unknown state %q", config.State)
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
@@ -751,9 +702,7 @@ type Options struct {
DriveType string `config:"drive_type"` DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"` RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"` DisableSitePermission bool `config:"disable_site_permission"`
ClientCredentials bool `config:"client_credentials"`
AccessScopes fs.SpaceSepList `config:"access_scopes"` AccessScopes fs.SpaceSepList `config:"access_scopes"`
Tenant string `config:"tenant"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"` ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"` ListChunk int64 `config:"list_chunk"`
@@ -1041,10 +990,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig.Scopes = opt.AccessScopes
oauthConfig, err := makeOauthConfig(ctx, opt) if opt.DisableSitePermission {
if err != nil { oauthConfig.Scopes = scopeAccessWithoutSites
return nil, err }
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
} }
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
@@ -1397,7 +1349,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// So we have to filter things outside of the root which is // So we have to filter things outside of the root which is
// inefficient. // inefficient.
list := list.NewHelper(callback) list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders // list a folder conventionally - used for shared folders
var listFolder func(dir string) error var listFolder func(dir string) error
@@ -1593,12 +1545,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision return the precision of this Fs // Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
// While this is true for some OneDrive personal accounts, it if f.driveType == driveTypePersonal {
// isn't true for all of them. See #8101 for details return time.Millisecond
// }
// if f.driveType == driveTypePersonal {
// return time.Millisecond
// }
return time.Second return time.Second
} }
@@ -1657,7 +1606,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -1672,18 +1621,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
err = srcObj.readMetaData(ctx) err := srcObj.readMetaData(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Find and remove existing object
cleanup, err := operations.RemoveExisting(ctx, f, remote, "server side copy")
if err != nil {
return nil, err
}
defer cleanup(&err)
// Check we aren't overwriting a file on the same remote // Check we aren't overwriting a file on the same remote
if srcObj.fs == f { if srcObj.fs == f {
srcPath := srcObj.rootPath() srcPath := srcObj.rootPath()
@@ -2533,7 +2475,10 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
remaining := size remaining := size
position := int64(0) position := int64(0)
for remaining > 0 { for remaining > 0 {
n := min(remaining, int64(o.fs.opt.ChunkSize)) n := int64(o.fs.opt.ChunkSize)
if remaining < n {
n = remaining
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n)) seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n) fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...) info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
@@ -2608,11 +2553,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("can't upload content to a OneNote file") return errors.New("can't upload content to a OneNote file")
} }
// Only start the renewer if we have a valid one o.fs.tokenRenewer.Start()
if o.fs.tokenRenewer != nil { defer o.fs.tokenRenewer.Stop()
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
size := src.Size() size := src.Size()

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"slices"
"testing" "testing"
"time" "time"
@@ -17,6 +16,7 @@ import (
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
) )
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v // go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
@@ -215,11 +215,11 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
compareDirMeta(expectedMeta, actualMeta, false) compareDirMeta(expectedMeta, actualMeta, false)
// modtime // modtime
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t1, newDst.ModTime(ctx), f.Precision()) assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
// try changing it and re-check it // try changing it and re-check it
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2) newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
assert.NoError(t, err) assert.NoError(t, err)
fstest.AssertTimeEqualWithPrecision(t, newDst.Remote(), t2, newDst.ModTime(ctx), f.Precision()) assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
// ensure that f.DirSetModTime also works // ensure that f.DirSetModTime also works
err = f.DirSetModTime(ctx, "subdir", t3) err = f.DirSetModTime(ctx, "subdir", t3)
assert.NoError(t, err) assert.NoError(t, err)
@@ -227,7 +227,7 @@ func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
assert.NoError(t, err) assert.NoError(t, err)
entries.ForDir(func(dir fs.Directory) { entries.ForDir(func(dir fs.Directory) {
if dir.Remote() == "subdir" { if dir.Remote() == "subdir" {
fstest.AssertTimeEqualWithPrecision(t, dir.Remote(), t3, dir.ModTime(ctx), f.Precision()) assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
} }
}) })

View File

@@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
// Calculate the current checksum // Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size + 1]byte) { func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
for i := range dataSize { for i := 0; i < dataSize; i++ {
shift := (i * 11) % 160 shift := (i * 11) % 160
shiftBytes := shift / 8 shiftBytes := shift / 8
shiftBits := shift % 8 shiftBits := shift % 8

View File

@@ -130,7 +130,10 @@ func TestQuickXorHashByBlock(t *testing.T) {
require.NoError(t, err, what) require.NoError(t, err, what)
h := New() h := New()
for i := 0; i < len(in); i += blockSize { for i := 0; i < len(in); i += blockSize {
end := min(i+blockSize, len(in)) end := i + blockSize
if end > len(in) {
end = len(in)
}
n, err := h.Write(in[i:end]) n, err := h.Write(in[i:end])
require.Equal(t, end-i, n, what) require.Equal(t, end-i, n, what)
require.NoError(t, err, what) require.NoError(t, err, what)

Some files were not shown because too many files have changed in this diff Show More