1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-07 11:03:15 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
c917d2d5b4 s3: fix --s3-versions when copying a single object
Before this change, if --s3-versions was enabled, then copying a
single object from a subdirectory would fail.

This was due to an incorrect comparison in the NewFs code.

This fixes the change and introduces a new unit tests.
2022-09-05 18:56:11 +01:00
786 changed files with 66856 additions and 123001 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,31 +8,29 @@ name: build
on: on:
push: push:
branches: branches:
- '**' - '*'
tags: tags:
- '**' - '*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
manual: manual:
description: Manual run (bypass default conditions)
type: boolean
required: true required: true
default: true default: true
jobs: jobs:
build: build:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.19.x'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -43,14 +41,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.19.x'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-11 os: macos-11
go: '1.21' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -59,14 +57,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-11 os: macos-11
go: '1.21' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '1.21' go: '1.19.x'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -76,20 +74,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.19.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.19 - job_name: go1.17
os: ubuntu-latest os: ubuntu-latest
go: '1.19' go: '1.17.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
- job_name: go1.20 - job_name: go1.18
os: ubuntu-latest os: ubuntu-latest
go: '1.20' go: '1.18.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -99,13 +97,14 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v2
with: with:
stable: 'false'
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
@@ -124,17 +123,12 @@ jobs:
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update brew update
brew install --cask macfuse brew install --cask macfuse
if: matrix.os == 'macos-11' if: matrix.os == 'macos-11'
@@ -168,7 +162,7 @@ jobs:
env env
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -222,17 +216,17 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)' # working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
@@ -240,39 +234,26 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android: android:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v1
with: with:
go-version: '1.21' go-version: 1.19.x
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -357,4 +338,4 @@ jobs:
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork # Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,77 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -0,0 +1,26 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -10,17 +10,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build image job name: Build image job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -48,17 +39,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: ubuntu-latest
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

2
.gitignore vendored
View File

@@ -8,10 +8,10 @@ rclone.iml
.idea .idea
.history .history
*.test *.test
*.log
*.iml *.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db Thumbs.db
__pycache__ __pycache__
.DS_Store

View File

@@ -2,17 +2,15 @@
linters: linters:
enable: enable:
- deadcode
- errcheck - errcheck
- goimports - goimports
- revive - revive
- ineffassign - ineffassign
- structcheck
- varcheck
- govet - govet
- unconvert - unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc #- prealloc
#- maligned #- maligned
disable-all: true disable-all: true
@@ -22,79 +20,11 @@ issues:
exclude-use-default: false exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -419,7 +419,7 @@ remote or an fs.
Research Research
* Look at the interfaces defined in `fs/types.go` * Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes * Study one or more of the existing remotes
Getting going Getting going
@@ -428,19 +428,14 @@ Getting going
* box is a good one to start from if you have a directory-based remote * box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote * b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info` * `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info` * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json` * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine * open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests Unit tests
* Create a config entry called `TestRemote` for the unit tests to use * Create a config entry called `TestRemote` for the unit tests to use
@@ -474,7 +469,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* `README.md` - main GitHub page * `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs) * make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them in your backend with `bin/make_backend_docs.py remote` * update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org * `docs/content/_index.md` - front page of rclone.org

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse3 tzdata && \ RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,11 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend | | Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend | | Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft** **This is a work in progress Draft**

35009
MANUAL.html generated

File diff suppressed because it is too large Load Diff

14070
MANUAL.md generated

File diff suppressed because it is too large Load Diff

34994
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,9 +81,6 @@ quicktest:
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -96,7 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux # Get the release dependencies we only install on linux
release_dep_linux: release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows # Get the release dependencies we only install on Windows
release_dep_windows: release_dep_windows:

View File

@@ -25,19 +25,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -46,13 +45,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -62,32 +59,23 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/) * OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/) * Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release ## Making a release
* git checkout master # see below for stable branch * git checkout master # see below for stable branch
* git pull # IMPORTANT * git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
* make test # see integration test server or run locally * make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX.0"
* make retag * make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin * git push --follow-tags origin
* # Wait for the GitHub builds to complete then... * # Wait for the GitHub builds to complete then...
* make fetch_binaries * make fetch_binaries
@@ -54,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release ## Making a point release
If rclone needs a point release due to some horrendous bug: If rclone needs a point release due to some horrendous bug:
@@ -75,7 +66,8 @@ Set vars
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable * make startstable
Now Now
@@ -90,28 +82,6 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push * git push
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker ## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.64.2 v1.60.0

View File

@@ -24,6 +24,7 @@ import (
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
@@ -34,20 +35,15 @@ import (
_ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"

File diff suppressed because it is too large Load Diff

View File

@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) { func TestIncrement(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in [8]byte in []byte
want [8]byte want []byte
}{ }{
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}}, {[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}}, {[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}}, {[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}}, {[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
} { } {
increment(&test.in) increment(test.in)
assert.Equal(t, test.want, test.in) assert.Equal(t, test.want, test.in)
} }
} }

View File

@@ -6,10 +6,10 @@
package azureblob package azureblob
import ( import (
"context"
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -17,31 +17,10 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{},
MinChunkSize: defaultChunkSize,
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
}) })
} }
@@ -53,6 +32,36 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
) )
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct { tests := map[string]struct {
accessTier string accessTier string

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -58,7 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
// Globals // Globals
@@ -74,15 +75,13 @@ func init() {
Description: "Backblaze B2", Description: "Backblaze B2",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID.", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key.", Help: "Application Key.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -148,18 +147,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`, 5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files. Help: `Disable checksums for large (> upload cutoff) files.
@@ -199,16 +186,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`, Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool.`,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -235,10 +222,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -263,6 +251,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
} }
// Object describes a b2 object // Object describes a b2 object
@@ -467,6 +456,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse), uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -600,24 +595,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// getRW gets a RW buffer and an upload token // getBuf gets a buffer of f.opt.ChunkSize and an upload token
// //
// If noBuf is set then it just gets an upload token // If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) { func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get() f.uploadToken.Get()
if !noBuf { if !noBuf {
rw = multipart.NewRW() buf = f.pool.Get()
} }
return rw return buf
} }
// putRW returns a RW buffer to the memory pool and returns an upload // putBuf returns a buffer to the memory pool and an upload token
// token
// //
// If buf is nil then it just returns the upload token // If noBuf is set then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) { func (f *Fs) putBuf(buf []byte, noBuf bool) {
if rw != nil { if !noBuf {
_ = rw.Close() f.pool.Put(buf)
} }
f.uploadToken.Put() f.uploadToken.Put()
} }
@@ -1227,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err) fs.Errorf(object.Name, "Can't create object %v", err)
continue continue
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name) err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err) checkErr(err)
tr.Done(ctx, err) tr.Done(ctx, err)
@@ -1241,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil { if err != nil {
fs.Errorf(object, "Can't create object %+v", err) fs.Errorf(object, "Can't create object %+v", err)
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
// Check current version of the file // Check current version of the file
if object.Action == "hide" { if object.Action == "hide" {
@@ -1297,11 +1291,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil { if err != nil {
return err return err
} }
err = up.Copy(ctx) return up.Upload(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
} }
dstBucket, dstPath := dstObj.split() dstBucket, dstPath := dstObj.split()
@@ -1430,7 +1420,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil { if err != nil {
return "", err return "", err
} }
absPath := "/" + urlEncode(bucketPath) absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket) bucketType, err := f.getbucketType(ctx, bucket)
if err != nil { if err != nil {
@@ -1869,11 +1859,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
if size < 0 { if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks) // Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false) buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize)) n, err := io.ReadFull(in, buf)
if err == nil { if err == nil {
bufReader := bufio.NewReader(in) bufReader := bufio.NewReader(in)
in = bufReader in = bufReader
@@ -1884,34 +1874,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming") fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil) up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil { if err != nil {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
// NB Stream returns the buffer and token // NB Stream returns the buffer and token
err = up.Stream(ctx, rw) return up.Stream(ctx, buf)
if err != nil { } else if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n) fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw) defer o.fs.putBuf(buf, false)
size = n size = int64(n)
in = rw in = bytes.NewReader(buf[:n])
} else { } else {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
} else if size > int64(o.fs.opt.UploadCutoff) { } else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
Open: o.fs,
OpenOptions: options,
})
if err != nil { if err != nil {
return err return err
} }
up := chunkWriter.(*largeUpload) return up.Upload(ctx)
return o.decodeMetaDataFileInfo(up.info)
} }
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
@@ -2019,41 +2001,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response) return o.decodeMetaDataFileInfo(&response)
} }
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -2081,15 +2028,14 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{}
_ fs.Object = &Object{} _ fs.MimeTyper = &Object{}
_ fs.MimeTyper = &Object{} _ fs.IDer = &Object{}
_ fs.IDer = &Object{}
) )

View File

@@ -1,19 +1,10 @@
package b2 package b2
import ( import (
"bytes"
"context"
"fmt"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// Test b2 string encoding // Test b2 string encoding
@@ -177,100 +168,3 @@ func TestParseTimeString(t *testing.T) {
} }
} }
// The integration tests do a reasonable job of testing the normal
// copy but don't test the chunked copy.
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
ctx := context.Background()
contents := random.String(8 * 1024 * 1024)
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, src.Remove(ctx))
}()
var itemCopy = item
itemCopy.Path += ".copy"
// Set copy cutoff to mininum value so we make chunks
origCutoff := f.opt.CopyCutoff
f.opt.CopyCutoff = minChunkSize
defer func() {
f.opt.CopyCutoff = origCutoff
}()
// Do the copy
dst, err := f.Copy(ctx, src, itemCopy.Path)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, src.Size(), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.True(t, srcModTime.Equal(dstModTime))
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
}
// The integration tests do a reasonable job of testing the normal
// streaming upload but don't test the chunked streaming upload.
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
ctx := context.Background()
contents := random.String(size)
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
// Set chunk size to mininum value so we make chunks
origOpt := f.opt
f.opt.ChunkSize = minChunkSize
f.opt.UploadCutoff = 0
defer func() {
f.opt = origOpt
}()
// Do the streaming upload
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
in := bytes.NewBufferString(contents)
dst, err := f.PutStream(ctx, in, src)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, int64(size), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.Equal(t, srcModTime, dstModTime)
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents, "Contents incorrect")
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
for _, size := range []fs.SizeSuffix{
minChunkSize - 1,
minChunkSize,
minChunkSize + 1,
(3 * minChunkSize) / 2,
(5 * minChunkSize) / 2,
} {
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
f.InternalTestChunkedStreamingUpload(t, int(size))
})
}
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -5,6 +5,7 @@
package b2 package b2
import ( import (
"bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -20,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -78,14 +78,12 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded id string // ID of the file being uploaded
size int64 // total size size int64 // total size
parts int // calculated number of parts, if known parts int64 // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,16 +91,18 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src // If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size() size := src.Size()
parts := 0 parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize chunkSize := defaultChunkSize
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize)) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
} }
sha1SliceSize = parts
} }
opts := rest.Opts{ opts := rest.Opts{
@@ -150,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID, id: response.ID,
size: size, size: size,
parts: parts, parts: parts,
sha1s: make([]string, 0, 16), sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
} }
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
@@ -169,26 +169,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
if len(up.uploads) > 0 { defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:] upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
return upload, nil return upload, nil
} }
@@ -203,39 +201,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// Add an sha1 to the being built up sha1s // Transfer a chunk
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
up.sha1smu.Lock() err := up.f.pacer.Call(func() (bool, error) {
defer up.sha1smu.Unlock() fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL(ctx)
@@ -243,8 +212,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err return false, err
} }
in := newHashAppendingReader(reader, sha1.New()) in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
sizeWithHash := size + int64(in.AdditionalLength()) size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization // Authorization
// //
@@ -274,10 +243,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in), Body: up.wrap(in),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), "X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end", sha1Header: "hex_digits_at_end",
}, },
ContentLength: &sizeWithHash, ContentLength: &size,
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -285,7 +254,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
// On retryable error clear PartUploadURL // On retryable error clear PartUploadURL
if retry { if retry {
@@ -293,30 +262,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil upload = nil
} }
up.returnUploadURL(upload) up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum()) up.sha1s[part-1] = in.HexSum()
return retry, err return retry, err
}) })
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else { } else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) fs.Debugf(up.o, "Done sending chunk %d", part)
} }
return size, err return err
} }
// Copy a chunk // Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_copy_part", Path: "/b2_copy_part",
} }
offset := int64(part) * up.chunkSize // where we are in the source file offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{ var request = api.CopyPartRequest{
SourceID: up.src.id, SourceID: up.src.id,
LargeFileID: up.id, LargeFileID: up.id,
PartNumber: int64(part + 1), PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -325,7 +294,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
up.addSha1(part, response.SHA1) up.sha1s[part-1] = response.SHA1
return retry, err return retry, err
}) })
if err != nil { if err != nil {
@@ -336,8 +305,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err return err
} }
// Close closes off the large upload // finish closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -355,12 +324,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
up.info = &response return up.o.decodeMetaDataFileInfo(&response)
return nil
} }
// Abort aborts the large upload // cancel aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error { func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what) fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -385,99 +353,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF. // reaches EOF.
// //
// Note that initialUploadBlock must be returned to f.putBuf() // Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true hasMoreParts = true
) )
up.size = initialUploadBlock.Size() up.size = int64(len(initialUploadBlock))
up.parts = 0 g.Go(func() error {
for part := 0; hasMoreParts; part++ { for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW var buf []byte
if part == 0 { if part == 1 {
rw = initialUploadBlock buf = initialUploadBlock
} else { } else {
rw = up.f.getRW(false) buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
} }
}
// Keep stats up to date // Fail fast, in case an errgroup managed function returns an error
up.parts += 1 // gCtx is cancelled. There is no point in uploading all the other parts.
up.size += n if gCtx.Err() != nil {
if part > maxParts { up.f.putBuf(buf, false)
up.f.putRW(rw) return nil
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) }
}
part := part // for the closure // Read the chunk
g.Go(func() (err error) { var n int
defer up.f.putRW(rw) if part == 1 {
_, err = up.WriteChunk(gCtx, part, rw) n = len(buf)
return err } else {
}) n, err = io.ReadFull(up.in, buf)
} if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
} }
// Copy the chunks from the source to the destination // Upload uploads the chunks from the input
func (up *largeUpload) Copy(ctx context.Context) (err error) { func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
) )
g.SetLimit(up.f.opt.UploadConcurrency) g.Go(func() error {
for part := 0; part < up.parts; part++ { for part := int64(1); part <= up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error // Get a block of memory from the pool and token which limits concurrency.
// gCtx is cancelled. There is no point in copying all the other parts. buf := up.f.getBuf(up.doCopy)
if gCtx.Err() != nil {
break
}
reqSize := remaining // Fail fast, in case an errgroup managed function returns an error
if reqSize >= up.chunkSize { // gCtx is cancelled. There is no point in uploading all the other parts.
reqSize = up.chunkSize if gCtx.Err() != nil {
} up.f.putBuf(buf, up.doCopy)
return nil
}
part := part // for the closure reqSize := remaining
g.Go(func() (err error) { if reqSize >= up.chunkSize {
return up.copyChunk(gCtx, part, reqSize) reqSize = up.chunkSize
}) }
remaining -= reqSize
} if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) return up.finish(ctx)
} }

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message out += ": " + e.Message
} }
if e.ContextInfo != nil { if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) out += fmt.Sprintf(" (%+v)", e.ContextInfo)
} }
return out return out
} }
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini // Types of things in Item
const ( const (
ItemTypeFolder = "folder" ItemTypeFolder = "folder"
ItemTypeFile = "file" ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted" ItemStatusDeleted = "deleted"
) )
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others // Item describes a folder or a file as returned by Get Folder Items and others
type Item struct { type Item struct {
Type string `json:"type"` Type string `json:"type"`
ID string `json:"id"` ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"` SequenceID string `json:"sequence_id"`
Etag string `json:"etag"` Etag string `json:"etag"`
SHA1 string `json:"sha1"` SHA1 string `json:"sha1"`
Name string `json:"name"` Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261 Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"` CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"` ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct { SharedLink struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
@@ -292,30 +281,3 @@ type User struct {
Address string `json:"address"` Address string `json:"address"`
AvatarURL string `json:"avatar_url"` AvatarURL string `json:"avatar_url"`
} }
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -17,9 +17,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -27,7 +27,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -46,6 +45,7 @@ import (
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -76,11 +76,6 @@ var (
} }
) )
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -107,18 +102,16 @@ func init() {
return nil, nil return nil, nil
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0", Default: "0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
@@ -149,23 +142,6 @@ func init() {
Default: "", Default: "",
Help: "Only show items owned by the login (email address) passed in.", Help: "Only show items owned by the login (email address) passed in.",
Advanced: true, Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -202,12 +178,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile) file, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err) return nil, fmt.Errorf("box: failed to read Box config: %w", err)
} }
@@ -218,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil return boxConfig, nil
} }
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &boxCustomClaims{ claims = &jws.ClaimSet{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely Iss: boxConfig.BoxAppSettings.ClientID,
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 Sub: boxConfig.EnterpriseID,
StandardClaims: jwt.StandardClaims{ Aud: tokenURL,
Id: val, Exp: time.Now().Add(time.Second * 45).Unix(),
Issuer: boxConfig.BoxAppSettings.ClientID, PrivateClaims: map[string]interface{}{
Subject: boxConfig.EnterpriseID, "box_sub_type": boxSubType,
Audience: tokenURL, "aud": tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(), "jti": val,
}, },
BoxSubType: boxSubType,
} }
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} { func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := map[string]interface{}{ signingHeaders := &jws.Header{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
} }
@@ -279,29 +258,19 @@ type Options struct {
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"` ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"` OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
} }
// Fs represents a remote box // Fs represents a remote box
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
} }
// Object describes a box object // Object describes a box object
@@ -449,14 +418,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -469,11 +436,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
} }
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file") jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type") boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -716,17 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
entries = append(entries, o) entries = append(entries, o)
} }
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false return false
}) })
if err != nil { if err != nil {
@@ -1166,7 +1117,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
var ( var (
deleteErrors atomic.Uint64 deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup wg sync.WaitGroup
) )
@@ -1182,7 +1133,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1) atomic.AddInt64(&deleteErrors, 1)
} }
}() }()
} else { } else {
@@ -1191,250 +1142,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false return false
}) })
wg.Wait() wg.Wait()
if deleteErrors.Load() != 0 { if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return err return err
} }
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
// box can send duplicate Event IDs; filter any in a single notify run
processedEventIDs := make(map[string]bool)
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
continue
}
processedEventIDs[entry.EventID] = true
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {

View File

@@ -76,19 +76,17 @@ func init() {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server.", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user.", Help: "The username of the Plex user.",
Sensitive: true,
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user.", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally.", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.", Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1040,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
fs.Debugf(dir, "list: remove entry: %v", entryRemote) fs.Debugf(dir, "list: remove entry: %v", entryRemote)
} }
entries = nil //nolint:ineffassign entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
@@ -1789,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
} }
} }
// StopBackgroundRunners will signal all the runners to stop their work // StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs // can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() { func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false f.cleanupChan <- false

View File

@@ -11,6 +11,7 @@ import (
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"os" "os"
@@ -101,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) { func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix()) id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs // Instantiate inner fs
innerFolder := "inner" innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder) runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil) rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content") runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "") listRoot, err := runInstance.list(t, rootFs, "")
@@ -164,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")} li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 { for _, r := range li2 {
var err error var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r)))) ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 { if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r) log.Printf("========== '%v' not in cache", r)
} else { } else {
@@ -223,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) { func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix()) id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -255,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) { func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix()) id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err) require.Error(t, err)
@@ -265,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -292,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object // write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content") runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -310,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error var err error
// create some rand test data // create some rand test data
@@ -339,7 +347,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) { func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -369,7 +378,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) { func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -395,7 +405,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) { func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix()) id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -449,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) { func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix()) id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -535,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) { func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix()) id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -621,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix()) id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -653,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -674,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -709,7 +725,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) { func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix()) id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -746,7 +763,9 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10 vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skipf("skipping crypt") t.Skipf("skipping crypt")
@@ -822,7 +841,7 @@ func newRun() *run {
} }
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil { if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err)) panic(fmt.Sprintf("Failed to create temp dir: %v", err))
} }
@@ -847,7 +866,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc return enc
} }
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise() fstest.Initialise()
remoteExists := false remoteExists := false
for _, s := range config.FileSections() { for _, s := range config.FileSections() {
@@ -940,15 +959,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "") err := f.Features().Purge(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
@@ -970,7 +984,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024) chunk := int64(1024)
cnt := size / chunk cnt := size / chunk
left := size % chunk left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for i := 0; i < int(cnt); i++ { for i := 0; i < int(cnt); i++ {
@@ -1098,6 +1112,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err return l, err
} }
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })

View File

@@ -21,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) { func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err) require.NoError(t, err)
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) { func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) { func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
func TestInternalUploadMoveExistingFile(t *testing.T) { func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix()) id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) { func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -152,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) { func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
totalFiles := 10 totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix())) rand.Seed(time.Now().Unix())
lastFile := "" lastFile := ""
for i := 0; i < totalFiles; i++ { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader) runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) { func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo" id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) { func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue continue
} }
var data []byte var data []byte
data, err = io.ReadAll(resp.Body) data, err = ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
continue continue
} }

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp) data, err := ioutil.ReadFile(fp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm) err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"path" "path"
"regexp" "regexp"
@@ -1037,7 +1038,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
metadata, err := io.ReadAll(reader) metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return err return err
@@ -1096,7 +1097,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil { if err != nil {
return "", err return "", err
} }
data, err := io.ReadAll(reader) data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -5,7 +5,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io" "io/ioutil"
"path" "path"
"regexp" "regexp"
"strings" "strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil { if r == nil {
return return
} }
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err) assert.NoError(t, err)
var chunkContents []byte var chunkContents []byte
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r) chunkContents, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx) r, err = willyChunk.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
_, err = io.ReadAll(r) _, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description) assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description) assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil { if err == nil && r != nil {
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description) assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok") assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close() _ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err) assert.Error(t, err)
// Rcat must fail // Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc")) in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil) robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj) assert.Nil(t, robj)
assert.NotNil(t, err) assert.NotNil(t, err)
if err != nil { if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx) r, err := dstFile.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, r) assert.NotNil(t, r)
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()

View File

@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree // Package combine implents a backend to combine multiple remotes in a directory tree
package combine package combine
/* /*
@@ -233,7 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
canMove := true canMove := true
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -290,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it // Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil { if features.ChangeNotify == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -310,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// show that we wrap other backends
features.Overlay = true
f.features = features f.features = features
// Get common intersection of hashes // Get common intersection of hashes
@@ -365,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait() return g.Wait()
} }
// join the elements together but unlike path.Join return empty string // join the elements together but unline path.Join return empty string
func join(elem ...string) string { func join(elem ...string) string {
result := path.Join(elem...) result := path.Join(elem...)
if result == "." { if result == "." {
@@ -645,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil { if err != nil {
return nil, err return nil, err
} }
uSrc := fs.NewOverrideRemote(src, uRemote) uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object var o fs.Object
if stream { if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...) o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
@@ -901,100 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
}) })
} }
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object // Object describes a wrapped Object
// //
// This is a wrapped Object which knows its path prefix // This is a wrapped Object which knows its path prefix
@@ -1024,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string { func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String()) newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil { if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err) fs.Errorf(o, "Bad object: %v", err)
return err.Error() return err.Error()
} }
return newPath return newPath
@@ -1096,10 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.FullObject = (*Object)(nil)
) )

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods, UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: unimplementableObjectMethods, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }

View File

@@ -13,6 +13,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"regexp" "regexp"
"strings" "strings"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -186,7 +186,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs // We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true f.features.ReadMimeType = true
@@ -257,16 +256,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt) return strings.HasSuffix(filename, metaFileExt)
} }
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed { if mode != Uncompressed {
@@ -378,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
meta, err := readMetadata(ctx, mo) meta := readMetadata(ctx, mo)
if err != nil { if meta == nil {
return nil, fmt.Errorf("error decoding metadata: %w", err) return nil, errors.New("error decoding metadata")
} }
// Create our Object // Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode)) o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil { return f.newObject(o, mo, meta), err
return nil, err
}
return f.newObject(o, mo, meta), nil
} }
// checkCompressAndType checks if an object is compressible and determines it's mime type // checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -478,7 +464,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
} }
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-") tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() { defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them // to ignore them
@@ -556,8 +542,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
} }
// Transfer the data // Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx)) //o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil { if err != nil {
if o != nil { if o != nil {
removeErr := o.Remove(ctx) removeErr := o.Remove(ctx)
@@ -691,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
} }
return nil, err return nil, err
} }
return f.newObject(dataObject, mo, meta), nil return f.newObject(dataObject, mo, meta), err
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
@@ -989,8 +975,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType) fs.Logf(f, "path %q entryType %d", path, entryType)
var ( var (
wrappedPath string wrappedPath string
isMetadataFile bool
) )
switch entryType { switch entryType {
case fs.EntryDirectory: case fs.EntryDirectory:
@@ -998,10 +983,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject: case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed, // Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path) wrappedPath = makeMetadataName(path)
if !isMetadataFile {
return
}
default: default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return return
@@ -1058,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
} }
// This function will read the metadata from a metadata object. // This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object // Open our meradata object
rc, err := mo.Open(ctx) rc, err := mo.Open(ctx)
if err != nil { if err != nil {
return nil, err return nil
} }
defer fs.CheckClose(rc, &err) defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc) jr := json.NewDecoder(rc)
meta = new(ObjectMetadata) meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil { if err = jr.Decode(meta); err != nil {
return nil, err return nil
} }
return meta, nil return meta
} }
// Remove removes this object // Remove removes this object
@@ -1115,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote() origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible { if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() { if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil { if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr return removeErr
@@ -1131,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// If we are, just update the object and metadata // If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil { }
return err if err != nil {
} return err
} }
// Update object metadata and return // Update object metadata and return
o.Object = newObject.Object o.Object = newObject.Object
@@ -1144,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1159,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand. // This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object { func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1189,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err return err
} }
if o.meta == nil { if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo) o.meta = readMetadata(ctx, o.mo)
} }
return err return err
} }

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt) opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
} }
// TestRemoteGzip tests GZIP compression // TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
} }
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip" name := "TestCompressGzip"
opt := defaultOpt fstests.Run(t, &fstests.Opt{
opt.RemoteName = name + ":" RemoteName: name + ":",
opt.ExtraConfig = []fstests.ExtraConfigItem{ NilObject: (*Object)(nil),
{Name: name, Key: "type", Value: "compress"}, UnimplementableFsMethods: []string{
{Name: name, Key: "remote", Value: tempdir}, "OpenWriterAt",
{Name: name, Key: "compression_mode", Value: "gzip"}, "MergeDirs",
} "DirCacheFlush",
opt.QuickTestOK = true "PutUnchecked",
fstests.Run(t, &opt) "PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version" "github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024 blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
) )
// Errors returned by cipher // Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed") ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix") ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file") ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!' obfuscQuoteRune = '!'
) )
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend // Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct { type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block block gocipher.Block
mode NameEncryptionMode mode NameEncryptionMode
fileNameEnc fileNameEncoding fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{ c := &Cipher{
mode: mode, mode: mode,
fileNameEnc: enc, fileNameEnc: enc,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
} }
c.buffers.New = func() interface{} { c.buffers.New = func() interface{} {
return new([blockSize]byte) return make([]byte, blockSize)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
if err != nil { if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil return c, nil
} }
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using // Key creates all the internal keys from the password passed in using
// scrypt. // scrypt.
// //
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte { func (c *Cipher) getBlock() []byte {
return c.buffers.Get().(*[blockSize]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) { func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf) c.buffers.Put(buf)
} }
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix return in + encryptedSuffix
} }
return c.encryptFileName(in) return c.encryptFileName(in)
} }
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) { if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} }
decrypted := in[:remainingLength] decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes // fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator // crypto random number generator
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err) return fmt.Errorf("short read of nonce: %w", err)
} }
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader in io.Reader
c *Cipher c *Cipher
nonce nonce nonce nonce
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
} }
} }
// Copy magic into buffer // Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes) copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer // Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil return fh, nil
} }
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize { if fh.bufIndex >= fh.bufSize {
// Read data // Read data
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize] readBuf := fh.readBuf[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf) n, err = io.ReadFull(fh.in, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err) return fh.finish(err)
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err // data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n fh.bufIndex += n
return n, nil return n, nil
} }
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *Cipher
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1, limit: -1,
} }
// Read file header (magic + nonce) // Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize] readBuf := fh.readBuf[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf) _, err := io.ReadFull(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes // This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil { } else if err != nil {
return nil, fh.finishAndClose(err) return nil, fh.finishAndClose(err)
} }
// check the magic // check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) { func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err return err
} }
// possibly err != nil here, but we will process the data and // possibly err != nil here, but we will process the data and
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists // Check header + 1 byte exists
if n <= blockHeaderSize { if n <= blockHeaderSize {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
if !fh.c.passBadBlocks { return ErrorEncryptedBadBlock
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
} }
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize fh.bufSize = n - blockHeaderSize
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) { if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit) toCopy = int(fh.limit)
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n fh.bufIndex += n
if fh.limit >= 0 { if fh.limit >= 0 {
fh.limit -= int64(n) fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// calculateUnderlying converts an (offset, limit) in an encrypted file // calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file. // into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// //
// It also returns number of bytes to discard after reading the first // It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can // block and number of blocks this is from the start so the nonce can

View File

@@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"strings" "strings"
"testing" "testing"
@@ -27,14 +28,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
if test.expectedErr == "" { if test.expectedErr == "" {
assert.NoError(t, actualErr) assert.NoError(t, actualErr)
} else { } else {
assert.EqualError(t, actualErr, test.expectedErr) assert.Error(t, actualErr, test.expectedErr)
} }
} }
} }
@@ -405,13 +406,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode // Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil) c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil) c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +484,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string in string
expected string expected string
expectedErr error expectedErr error
customSuffix string
}{ }{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual, what)
@@ -739,7 +727,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn") buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf) err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
func TestNonceFromBuf(t *testing.T) { func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1051,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1") assert.Error(t, err, "Error in stream")
} }
type zeroes struct{} type zeroes struct{}
@@ -1085,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize) source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil) encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err) assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted)) decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err) assert.NoError(t, err)
sink := newRandomSource(copySize) sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf) n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1156,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
out, err := io.ReadAll(encrypted) out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expected, out) assert.Equal(t, test.expected, out)
// Check we can decode the data properly too... // Check we can decode the data properly too...
buf = bytes.NewBuffer(out) buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf)) decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err) assert.NoError(t, err)
out, err = io.ReadAll(decrypted) out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.in, out) assert.Equal(t, test.in, out)
} }
@@ -1180,13 +1168,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil) fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce) assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32]) assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path // Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn") c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1199,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -1237,7 +1225,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i])) cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error()) assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1245,7 +1233,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// bad magic // bad magic
@@ -1256,7 +1244,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy)) cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1 file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1269,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -1286,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data // Make random data
const dataSize = 150000 const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize)) plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err) assert.NoError(t, err)
// Encrypt the data // Encrypt the data
buf := bytes.NewBuffer(plaintext) buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted) ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1312,7 +1300,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext) end = len(ciphertext)
} }
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
} }
@@ -1502,16 +1490,14 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what) assert.NoError(t, err, what)
continue continue
} }
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
var expectedErr error var expectedErr error
switch { switch {
case i == fileHeaderSize: case i == fileHeaderSize:
// This would normally produce an error *except* on the first block // This would normally produce an error *except* on the first block
expectedErr = nil expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default: default:
expectedErr = ErrorEncryptedBadBlock expectedErr = io.ErrUnexpectedEOF
} }
if expectedErr != nil { if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what) assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1528,8 +1514,8 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// Test corrupting the input // Test corrupting the input
@@ -1538,28 +1524,17 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16) copy(file16copy, file16)
for i := range file16copy { for i := range file16copy {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
} }
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
@@ -1580,7 +1555,7 @@ func TestDecrypterClose(t *testing.T) {
// double close // double close
err = fh.Close() err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error()) assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// try again reading the file this time // try again reading the file this time
@@ -1590,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// close after reading // close after reading
out, err := io.ReadAll(fh) out, err := ioutil.ReadAll(fh)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []byte{1}, out) assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err) assert.Equal(t, io.EOF, fh.err)
@@ -1607,6 +1582,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock() block := c.getBlock()
c.putBlock(block) c.putBlock(block)
c.putBlock(block) c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@@ -79,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it. pointing to the same backend you can use it.
@@ -121,15 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, { }, {
Name: "filename_encoding", Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string. Help: `How to encode the encrypted filename to text string.
@@ -149,18 +138,10 @@ length and if it's case sensitive.`,
}, },
{ {
Value: "base32768", Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
}, },
}, },
Advanced: true, Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}}, }},
}) })
} }
@@ -193,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err) return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil return cipher, nil
} }
@@ -256,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false, WriteMimeType: false,
@@ -268,7 +247,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err return f, err
@@ -284,9 +262,7 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"` FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
@@ -420,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil { if err == nil && o != nil {
@@ -439,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of // Find a hash the destination supports to compute a hash of
// the encrypted data // the encrypted data
ht := f.Fs.Hashes().GetOne() ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
if ht != hash.None { if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -478,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }
@@ -1076,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one // Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok { if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion // Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok { } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Unwrap if it is an operations.OverrideRemote // Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap() srcObj = do.UnWrap()
} else { } else {
// Otherwise don't unwrap any further
return "", nil return "", nil
} }
// if this is wrapping a local object then we work out the hash // if this is wrapping a local object then we work out the hash

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from // Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) { func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background()) localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), "")) require.NoError(t, localFs.Rmdir(context.Background(), ""))
}) }
return localFs return localFs, cleanup
} }
// Upload a file to a remote // Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) { func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC) t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil) upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc) obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, obj.Remove(context.Background())) require.NoError(t, obj.Remove(context.Background()))
}) }
return obj return obj, cleanup
} }
// Test the ObjectInfo // Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap" path = "_wrap"
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents) obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data // encrypt the data
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj var oi fs.ObjectInfo = obj
if wrap { if wrap {
// wrap the object in an fs.ObjectUnwrapper if required // wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote") oi = testWrapper{oi}
} }
// wrap the object in a crypt for upload using the nonce we // wrap the object in a crypt for upload using the nonce we
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs) t.Skipf("%v: does not support hashes", f.Fs)
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object // Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents) localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also // Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents) remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object // Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType) computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"}, {Name: name, Key: "filename_encoding", Value: "base64"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"}, {Name: name, Key: "filename_encoding", Value: "base32768"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"}, {Name: name, Key: "no_data_encryption", Value: "true"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })

View File

@@ -14,10 +14,11 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"net/http" "net/http"
"os"
"path" "path"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -202,7 +203,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder") m.Set("root_folder_id", "appDataFolder")
} }
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth { if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{ return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig, OAuth2Config: driveConfig,
}) })
@@ -277,23 +278,20 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point. a non root folder as its starting point.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "team_drive", Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).", Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "auth_owner_only", Name: "auth_owner_only",
Default: false, Default: false,
@@ -419,11 +417,10 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.", Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true, Advanced: true,
}, { }, {
Name: "impersonate", Name: "impersonate",
Default: "", Default: "",
Help: `Impersonate this user when using a service account.`, Help: `Impersonate this user when using a service account.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "alternate_export", Name: "alternate_export",
Default: false, Default: false,
@@ -455,11 +452,7 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to "cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone indicate you acknowledge the risks of downloading the file and rclone
will download it anyway. will download it anyway.`,
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "keep_revision_forever", Name: "keep_revision_forever",
@@ -503,9 +496,7 @@ need to use --ignore size also.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default different Google drives. Note that this isn't enabled by default
@@ -594,32 +585,9 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the user you've authenticated rclone with) seems to be enough so that the
resource key is not needed. resource key is no needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`, `,
Advanced: true, Advanced: true,
Default: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -627,18 +595,6 @@ having trouble with like many empty directories.
// Encode invalid UTF-8 bytes as json doesn't handle them properly. // Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive. // Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8, Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
@@ -694,9 +650,7 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"` SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"` ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
} }
// Fs represents a remote drive server // Fs represents a remote drive server
@@ -804,7 +758,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" { } else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err) fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") { } else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err) fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" { } else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1154,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -1165,12 +1119,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err) return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
} }
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt)) oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil { if err != nil {
@@ -1262,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -1542,9 +1489,6 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote) info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1914,7 +1858,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing // drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and // sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397 // https://issuetracker.google.com/issues/149522397
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems { if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 { if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs)) fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
} }
@@ -2932,7 +2876,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder") changesCall.Spaces("appDataFolder")
} }
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do() changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
@@ -3379,9 +3322,9 @@ This takes an optional directory to trash which make this easier to
use via the API. use via the API.
rclone backend untrash drive:directory rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir rclone backend -i untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it. Use the -i flag to see what would be restored before restoring it.
Result: Result:
@@ -3411,7 +3354,7 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be If the destination is a drive backend then server-side copying will be
attempted if possible. attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying. Use the -i flag to see what would be copied before copying.
`, `,
}, { }, {
Name: "exportformats", Name: "exportformats",
@@ -3487,12 +3430,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil { if err != nil {
return nil, err return nil, err
} }
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok { if _, ok := opt["config"]; ok {
lines := []string{} lines := []string{}
upstreams := []string{} upstreams := []string{}
names := make(map[string]struct{}, len(drives)) names := make(map[string]struct{}, len(drives))
for i, drive := range drives { for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name) name := re.ReplaceAllString(drive.Name, "_")
for { for {
if _, found := names[name]; !found { if _, found := names[name]; !found {
break break
@@ -3855,7 +3799,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit] data = data[:limit]
} }
return io.NopCloser(bytes.NewReader(data)), nil return ioutil.NopCloser(bytes.NewReader(data)), nil
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3914,7 +3858,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info) newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"os" "os"
"path" "path"
@@ -77,7 +78,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct { var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"` ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"` ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -243,15 +244,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry) assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError) assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
} }
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -526,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) { func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -13,6 +13,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
@@ -139,12 +140,55 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil return complete, nil
} }
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch // commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) { func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync // If commit fails then signal clients if sync
var signalled = b.async var signalled = b.async
defer func() { defer func() {
if err != nil && !signalled { if err != nil && signalled {
// Signal to clients that there was an error // Signal to clients that there was an error
for _, result := range results { for _, result := range results {
result <- batcherResponse{err: err} result <- batcherResponse{err: err}

View File

@@ -58,7 +58,7 @@ import (
const ( const (
rcloneClientID = "5jcck7diasz0rqy" rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g" rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
defaultMinSleep = fs.Duration(10 * time.Millisecond) minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow. // Upload chunk size - setting too small makes uploads slow.
@@ -182,9 +182,8 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere. v1.55 or later is in use everywhere.
`, `,
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "shared_files", Name: "shared_files",
Help: `Instructs rclone to work on individual shared files. Help: `Instructs rclone to work on individual shared files.
@@ -261,8 +260,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use. default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s - batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 500ms - batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use - batch_mode: off - not in use
`, `,
Default: fs.Duration(0), Default: fs.Duration(0),
@@ -272,11 +271,6 @@ default based on the batch_mode in use.
Help: `Max time to wait for a batch to finish committing`, Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute), Default: fs.Duration(10 * time.Minute),
Advanced: true, Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -305,7 +299,6 @@ type Options struct {
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"` BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"` AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -449,7 +442,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci, ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout)) f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil { if err != nil {
@@ -543,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default: default:
return nil, err return nil, err
} }
// if the mount failed we have to abort here // if the moint failed we have to abort here
} }
// if the mount succeeded it's now a normal folder in the users root namespace // if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally // we disable shared folder mode and proceed normally
@@ -726,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name) leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId) d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d) entries = append(entries, d)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -913,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath)) leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id) d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d) entries = append(entries, d)
} else if fileInfo != nil { } else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo) o, err := f.newObjectWithInfo(ctx, remote, fileInfo)

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded 509, // Bandwidth Limit Exceeded
} }
var errorRegex = regexp.MustCompile(`#(\d{1,3})`) var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int { func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error()) matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 { if len(matches) == 0 {
return 0 return 0
} }
code, err := strconv.Atoi(matches[1]) code, err := strconv.Atoi(matches[0])
if err != nil { if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err) fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0 return 0
@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1, Single: 1,
Pass: f.opt.FilePassword, Pass: f.opt.FilePassword,
} }
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/download/get_token.cgi", Path: "/download/get_token.cgi",
@@ -408,32 +405,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil return response, nil
} }
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) { func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{ request := &CopyFileRequest{
URLs: []string{url}, URLs: []string{url},
@@ -502,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err) return nil, fmt.Errorf("didnt got an upload node: %w", err)
} }
// fs.Debugf(f, "Got Upload node") // fs.Debugf(f, "Got Upload node")

View File

@@ -38,9 +38,8 @@ func init() {
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
Sensitive: true,
}, { }, {
Help: "If you want to download a shared folder, add this parameter.", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
@@ -55,11 +54,6 @@ func init() {
Name: "folder_password", Name: "folder_password",
Advanced: true, Advanced: true,
IsPassword: true, IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -95,7 +89,6 @@ type Options struct {
SharedFolder string `config:"shared_folder"` SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"` FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"` FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -340,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) { if size > int64(300e9) {
return nil, errors.New("File too big, can't upload") return nil, errors.New("File too big, cant upload")
} else if size == 0 { } else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles return nil, fs.ErrorCantUploadEmptyFiles
} }
@@ -488,51 +481,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil return dstObj, nil
} }
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations. // Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
@@ -606,7 +554,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,7 +20,6 @@ type DownloadRequest struct {
URL string `json:"url"` URL string `json:"url"`
Single int `json:"single"` Single int `json:"single"`
Pass string `json:"pass,omitempty"` Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
} }
// RemoveFolderRequest is the request structure of the corresponding request // RemoveFolderRequest is the request structure of the corresponding request
@@ -70,22 +69,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`
} }
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request // CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct { type CopyFileRequest struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`

View File

@@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -84,7 +85,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
Sensitive: true,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token. Help: `Permanent Authentication Token.
@@ -98,7 +98,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
Sensitive: true,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token. Help: `Session Token.
@@ -108,8 +107,7 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time. Help: `Token expiry time.
@@ -158,9 +156,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token tokenMu sync.Mutex // hold when reading the token
token string // current access token token string // current access token
tokenExpiry time.Time // time the current token expires tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32 tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported precision time.Duration // precision reported
} }
// Object describes a filefabric object // Object describes a filefabric object
@@ -243,7 +241,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC err = status // return the error from the RPC
code := status.GetCode() code := status.GetCode()
if code == "login_token_expired" { if code == "login_token_expired" {
f.tokenExpired.Add(1) atomic.AddInt32(&f.tokenExpired, 1)
} else { } else {
for _, retryCode := range retryStatusCodes { for _, retryCode := range retryStatusCodes {
if code == retryCode.code { if code == retryCode.code {
@@ -323,12 +321,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false var refreshed = false
defer func() { defer func() {
if refreshed { if refreshed {
f.tokenExpired.Store(0) atomic.StoreInt32(&f.tokenExpired, 0)
} }
f.tokenMu.Unlock() f.tokenMu.Unlock()
}() }()
expired := f.tokenExpired.Load() != 0 expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired { if expired {
fs.Debugf(f, "Token invalid - refreshing") fs.Debugf(f, "Token invalid - refreshing")
} }
@@ -1188,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
if o.contentType == emptyMimeType { if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{ resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
) )
@@ -49,15 +48,13 @@ func init() {
Description: "FTP", Description: "FTP",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".", Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "user", Name: "user",
Help: "FTP username.", Help: "FTP username.",
Default: currentUser, Default: currentUser,
Sensitive: true,
}, { }, {
Name: "port", Name: "port",
Help: "FTP port number.", Help: "FTP port number.",
@@ -73,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTPS.`, than port 21. Cannot be used in combination with explicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "explicit_tls", Name: "explicit_tls",
@@ -81,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
When using explicit FTP over TLS the client explicitly requests When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTPS.`, to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
@@ -127,11 +124,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)", Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
@@ -175,18 +167,6 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password If this is set and no password is supplied then rclone will ask for a password
`, `,
Advanced: true, Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -225,13 +205,11 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"` DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"` DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"` WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"` AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -249,6 +227,7 @@ type Fs struct {
pool []*ftp.ServerConn pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime fSetTime bool // true if the ftp library accepts SetTime
@@ -330,118 +309,48 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be // shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience // retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) { func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
if isRetriableFtpError(err) { switch errX := err.(type) {
return true, err case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address) conn, err = fshttp.NewDialer(ctx).Dial(network, address)
defer func() { if f.tlsConf != nil && err == nil {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err) conn = tls.Client(conn, f.tlsConf)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
} }
if err != nil { return
return nil, err
}
// Connect using cleartext only for non TLS
if tlsConfig == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
} }
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server. // as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -458,9 +367,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM { if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true)) ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
} }
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }
@@ -524,7 +430,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
if tpErr := textprotoError(err); tpErr != nil { var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp() nopErr := c.NoOp()
if nopErr != nil { if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr) fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -596,6 +503,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS { if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
} }
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
@@ -608,11 +528,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency), tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// set the pool drainer timer going // set the pool drainer timer going
if f.opt.IdleTimeout > 0 { if f.opt.IdleTimeout > 0 {
@@ -660,7 +580,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file // translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error { func translateErrorFile(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound err = fs.ErrorObjectNotFound
@@ -671,7 +592,8 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory // translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error { func translateErrorDir(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
@@ -702,7 +624,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
if remote == "" || remote == "." || remote == "/" { fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry // if root, assume exists and synthesize an entry
return &ftp.Entry{ return &ftp.Entry{
Name: "", Name: "",
@@ -710,38 +633,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(), Time: time.Now(),
}, nil }, nil
} }
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("findItem: %w", err) return nil, fmt.Errorf("findItem: %w", err)
} }
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -760,7 +658,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -782,7 +680,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not // dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return false, fmt.Errorf("dirExists: %w", err) return false, fmt.Errorf("dirExists: %w", err)
} }
@@ -926,18 +824,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) { func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
file, err := f.findItem(ctx, remote) dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("getInfo: %w", err)
} else if file != nil { }
info := &FileInfo{ files, err := c.List(f.dirFromStandardPath(dir))
Name: remote, f.putFtpConnection(&c, err)
Size: file.Size, if err != nil {
ModTime: file.Time, return nil, translateErrorFile(err)
precise: f.fLstTime, }
IsDir: file.Type == ftp.EntryTypeFolder,
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
} }
return info, nil
} }
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
@@ -968,7 +880,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181 case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil err = nil
@@ -1137,7 +1050,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime { if !o.fs.fSetTime {
fs.Debugf(o.fs, "SetModTime is not supported") fs.Errorf(o.fs, "SetModTime is not supported")
return nil return nil
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection(ctx)
@@ -1209,7 +1122,8 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close // mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd // NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257 // See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil err = nil
@@ -1235,26 +1149,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
} }
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil { if err != nil {
return nil, fmt.Errorf("open: %w", err) return nil, fmt.Errorf("open: %w", err)
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs} rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil return rc, nil
} }
@@ -1287,10 +1190,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers // Ignore error 250 here - send by some servers
if errX := textprotoError(err); errX != nil { if err != nil {
switch errX.Code { switch errX := err.(type) {
case ftp.StatusRequestedFileActionOK: case *textproto.Error:
err = nil switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
} }
} }
if err != nil { if err != nil {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout // test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) { func (f *Fs) testUploadTimeout(t *testing.T) {
const ( const (
fileSize = 100000000 // 100 MiB fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup maxTime = 10 * time.Second // prevent test hangup
) )
if testing.Short() { if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -82,8 +82,7 @@ func init() {
saFile, _ := m.Get("service_account_file") saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials") saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous") anonymous, _ := m.Get("anonymous")
envAuth, _ := m.Get("env_auth") if saFile != "" || saCreds != "" || anonymous == "true" {
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil return nil, nil
} }
return oauthutil.ConfigOut("", &oauthutil.Options{ return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -91,21 +90,15 @@ func init() {
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Sensitive: true,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -304,15 +297,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY", Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class", Help: "Durable reduced availability storage class",
}}, }},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, { }, {
Name: "no_check_bucket", Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it. Help: `If set, don't attempt to check the bucket exists or create it.
@@ -346,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base | Default: (encoder.Base |
encoder.EncodeCrLf | encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
} }
@@ -364,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ProjectNumber string `config:"project_number"` ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"` ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"` ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"` Anonymous bool `config:"anonymous"`
@@ -377,8 +349,6 @@ type Options struct {
Decompress bool `config:"decompress"` Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -474,7 +444,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath // split returns bucket and bucketPath from the rootRelativePath
// relative to f.root // relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath)) bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
} }
@@ -517,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -530,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err) return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
} }
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil { if err != nil {
@@ -560,9 +525,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = oAuthClient f.client = oAuthClient
@@ -579,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx) _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -643,13 +601,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/" directory += "/"
} }
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse { if !recurse {
list = list.Delimiter("/") list = list.Delimiter("/")
} }
foundItems := 0
for { for {
var objects *storage.Objects var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -665,7 +619,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err return err
} }
if !recurse { if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object var object storage.Object
for _, remote := range objects.Prefixes { for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") { if !strings.HasSuffix(remote, "/") {
@@ -686,29 +639,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
} }
} }
foundItems += len(objects.Items)
for _, object := range objects.Items { for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name) remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):] remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket { if addBucket {
remote = path.Join(bucket, remote) remote = path.Join(bucket, remote)
} }
// is this a directory marker?
err = fn(remote, object, isDirectory) if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil { if err != nil {
return err return err
} }
@@ -718,17 +664,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
list.PageToken(objects.NextPageToken) list.PageToken(objects.NextPageToken)
} }
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil return nil
} }
@@ -772,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number") return nil, errors.New("can't list buckets without project number")
} }
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -892,69 +824,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...) return f.Put(ctx, in, src, options...)
} }
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir) bucket, _ := f.split(dir)
e := f.checkBucket(ctx, bucket) return f.makeBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
} }
// makeBucket creates the bucket if it doesn't exist // makeBucket creates the bucket if it doesn't exist
@@ -963,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a // List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details. // service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx) _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -1002,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly { if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL) insertBucket.PredefinedAcl(f.opt.BucketACL)
} }
insertBucket = insertBucket.Context(ctx) _, err = insertBucket.Context(ctx).Do()
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}, nil) }, nil)
@@ -1026,28 +891,12 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty. // to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" { if bucket == "" || directory != "" {
return nil return nil
} }
return f.cache.Remove(bucket, func() error { return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx) err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}) })
@@ -1069,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote) dstBucket, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote) err := f.checkBucket(ctx, dstBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1093,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse var rewriteResponse *storage.RewriteResponse
for { for {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
rewriteRequest = rewriteRequest.Context(ctx) rewriteResponse, err = rewriteRequest.Context(ctx).Do()
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1207,17 +1052,8 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object // readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
return o.fs.readObjectInfo(ctx, bucket, bucketPath) err = o.fs.pacer.Call(func() (bool, error) {
} object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1289,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
} }
copyObject = copyObject.Context(ctx) newObject, err = copyObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1310,9 +1142,6 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil) req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1356,14 +1185,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
// Create parent dir/bucket if not saving directory marker err := o.fs.checkBucket(ctx, bucket)
if !strings.HasSuffix(o.remote, "/") { if err != nil {
err = o.fs.mkdirParent(ctx, o.remote) return err
if err != nil {
return err
}
} }
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
@@ -1408,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
insertObject = insertObject.Context(ctx) newObject, err = insertObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1427,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx) err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
return err return err

View File

@@ -6,7 +6,6 @@ import (
"testing" "testing"
"github.com/rclone/rclone/backend/googlecloudstorage" "github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil), NilObject: (*googlecloudstorage.Object)(nil),
}) })
} }
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -3,7 +3,7 @@ package googlephotos
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"path" "path"
"testing" "testing"
@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) { t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx) in, err := dstObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
buf, err := io.ReadAll(in) buf, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, in.Close()) require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000) assert.True(t, len(buf) > 1000)
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil { if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err) fs.Errorf(nil, "%s: failed to import: %v", remote, err)
} }
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err) accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++ doneCount++
} }
}) })

View File

@@ -166,7 +166,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
} }
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs) f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil), NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
}, },
UnimplementableObjectMethods: []string{}, UnimplementableObjectMethods: []string{},
} }

View File

@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"time" "time"
@@ -117,7 +118,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() { defer func() {
_ = r.Close() _ = r.Close()
}() }()
if _, err = io.Copy(io.Discard, r); err != nil { if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err) fs.Infof(o, "update failed (copy): %v", err)
return err return err
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
) )
// Fs represents a HDFS server // Fs represents a HDFS server
@@ -32,15 +31,8 @@ type Fs struct {
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config ci *fs.ConfigInfo // global config
client *hdfs.Client client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
} }
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go // copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) { func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG") configPath := os.Getenv("KRB5_CONFIG")
@@ -122,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
ci: fs.GetConfig(ctx), ci: fs.GetConfig(ctx),
client: client, client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -19,10 +19,9 @@ func init() {
Description: "Hadoop distributed file system", Description: "Hadoop distributed file system",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "namenode", Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.", Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "username", Name: "username",
Help: "Hadoop user name.", Help: "Hadoop user name.",
@@ -30,7 +29,6 @@ func init() {
Value: "root", Value: "root",
Help: "Connect to hdfs as root.", Help: "Connect to hdfs as root.",
}}, }},
Sensitive: true,
}, { }, {
Name: "service_principal_name", Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode. Help: `Kerberos service principal name for the namenode.
@@ -38,16 +36,15 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\" (SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`, for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "data_transfer_protection", Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy. Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity Specifies whether or not authentication, data signature integrity
checks, and wire encryption are required when communicating with checks, and wire encryption is required when communicating the the
the datanodes. Possible values are 'authentication', 'integrity' datanodes. Possible values are 'authentication', 'integrity' and
and 'privacy'. Used only with KERBEROS enabled.`, 'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "privacy", Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.", Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -5,12 +5,10 @@ package hdfs
import ( import (
"context" "context"
"errors"
"io" "io"
"path" "path"
"time" "time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -108,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object // Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(o.remote) realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath) dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath) fs.Debugf(o.fs, "update [%s]", realpath)
@@ -143,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// If the datanodes have acknowledged all writes but not yet err = out.Close()
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
if err != nil { if err != nil {
cleanup() cleanup()
return err return err

View File

@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil return &result, nil
} }
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and // moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful. // returns the resulting api-object if successful.
// //

View File

@@ -2,7 +2,7 @@
package hidrive package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less. // FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files or folders with longer names will throw an HTTP error: // Operations that create files oder folder with longer names will throw a HTTP error:
// - 422 Unprocessable Entity // - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable. // A more graceful way for rclone to handle this may be desirable.
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err) return nil, fmt.Errorf("could not access root-prefix: %w", err)
} }
if item.Type != api.HiDriveObjectTypeDirectory { if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty") return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
} }
} }

View File

@@ -13,6 +13,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -304,7 +305,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.head(ctx) err := o.stat(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -316,6 +317,15 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote) return f.endpointURL + rest.URLPathEscape(remote)
} }
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName // Errors returned by parseName
var ( var (
errURLJoinFailed = errors.New("URLJoin failed") errURLJoinFailed = errors.New("URLJoin failed")
@@ -490,12 +500,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f, fs: f,
remote: remote, remote: remote,
} }
switch err := file.head(ctx); err { switch err := file.stat(ctx); err {
case nil: case nil:
add(file) add(file)
case fs.ErrorNotAFile: case fs.ErrorNotAFile:
// ...found a directory not a file // ...found a directory not a file
add(fs.NewDir(remote, time.Time{})) add(fs.NewDir(remote, timeUnset))
default: default:
fs.Debugf(remote, "skipping because of error: %v", err) fs.Debugf(remote, "skipping because of error: %v", err)
} }
@@ -507,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/") name = strings.TrimRight(name, "/")
remote := path.Join(dir, name) remote := path.Join(dir, name)
if isDir { if isDir {
add(fs.NewDir(remote, time.Time{})) add(fs.NewDir(remote, timeUnset))
} else { } else {
in <- remote in <- remote
} }
@@ -569,8 +579,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote) return o.fs.url(o.remote)
} }
// head sends a HEAD request to update info fields in the Object // stat updates the info field in the Object
func (o *Object) head(ctx context.Context) error { func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead { if o.fs.opt.NoHead {
o.size = -1 o.size = -1
o.modTime = timeUnset o.modTime = timeUnset
@@ -591,19 +601,13 @@ func (o *Object) head(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to stat: %w", err) return fmt.Errorf("failed to stat: %w", err)
} }
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified")) t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil { if err != nil {
t = timeUnset t = timeUnset
} }
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t o.modTime = t
o.contentType = res.Header.Get("Content-Type") o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory // If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash { if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType) mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -649,9 +653,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil { if err != nil {
return nil, fmt.Errorf("Open failed: %w", err) return nil, fmt.Errorf("Open failed: %w", err)
} }
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil return res.Body, nil
} }

View File

@@ -3,7 +3,7 @@ package http
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@@ -33,21 +33,20 @@ var (
lineEndSize = 1 lineEndSize = 1
) )
// prepareServer prepares the test server and shuts it down automatically // prepareServer the test server and return a function to tidy it up afterwards
// when the test completes. func prepareServer(t *testing.T) (configmap.Simple, func()) {
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files // file server for test/files
fileServer := http.FileServer(http.Dir(filesPath)) fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings // verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there // are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting) // we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath) fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(fileList), 0) require.Greater(t, len(fileList), 0)
for _, file := range fileList { for _, file := range fileList {
if !file.IsDir() { if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name())) data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") { if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2 lineEndSize = 2
} }
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
"url": ts.URL, "url": ts.URL,
"headers": strings.Join(headers, ","), "headers": strings.Join(headers, ","),
} }
t.Cleanup(ts.Close)
return m // return a function to tidy up
return m, ts.Close
} }
// prepare prepares the test server and shuts it down automatically // prepare the test server and return a function to tidy it up afterwards
// when the test completes. func prepare(t *testing.T) (fs.Fs, func()) {
func prepare(t *testing.T) fs.Fs { m, tidy := prepareServer(t)
m := prepareServer(t)
// Instantiate it // Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m) f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err) require.NoError(t, err)
return f return f, tidy
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
} }
func TestListRoot(t *testing.T) { func TestListRoot(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false) testListRoot(t, f, false)
} }
func TestListRootNoSlash(t *testing.T) { func TestListRootNoSlash(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true) testListRoot(t, f, true)
} }
func TestListSubDir(t *testing.T) { func TestListSubDir(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three") entries, err := f.List(context.Background(), "three")
require.NoError(t, err) require.NoError(t, err)
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
} }
func TestNewObject(t *testing.T) { func TestNewObject(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -192,69 +194,36 @@ func TestNewObject(t *testing.T) {
} }
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
m := prepareServer(t) f, tidy := prepare(t)
defer tidy()
for _, head := range []bool{false, true} { o, err := f.NewObject(context.Background(), "four/under four.txt")
if !head { require.NoError(t, err)
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} { // Test normal read
o, err := f.NewObject(context.Background(), "four/under four.txt") fd, err := o.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
if !head { require.NoError(t, err)
// Test mod time is still indeterminate require.NoError(t, fd.Close())
tObj := o.ModTime(context.Background()) if lineEndSize == 2 {
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj)) assert.Equal(t, "beetroot\r\n", string(data))
} else {
// Test file size is still indeterminate assert.Equal(t, "beetroot\n", string(data))
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
} }
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
} }
func TestMimeType(t *testing.T) { func TestMimeType(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -265,7 +234,8 @@ func TestMimeType(t *testing.T) {
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m) f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
@@ -274,7 +244,8 @@ func TestIsAFileRoot(t *testing.T) {
} }
func TestIsAFileSubDir(t *testing.T) { func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m) f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)

62
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,62 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

200
backend/hubic/hubic.go Normal file
View File

@@ -0,0 +1,200 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -0,0 +1,19 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -133,13 +133,11 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: "access_key_id", Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php", Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.", Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, { }, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two // their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint", Name: "endpoint",

View File

@@ -12,6 +12,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@@ -67,21 +68,13 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0 legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token" teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth" teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop" teliaCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token" tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth" tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop" tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
) )
// Register with Fs // Register with Fs
@@ -92,7 +85,7 @@ func init() {
Description: "Jottacloud", Description: "Jottacloud",
NewFs: NewFs, NewFs: NewFs,
Config: Config, Config: Config,
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -127,7 +120,7 @@ func init() {
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }
@@ -142,17 +135,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy", Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.", Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, { }, {
Value: "telia_se", Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).", Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, { }, {
Value: "tele2", Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.", Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}}) }})
case "auth_type_done": case "auth_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen
@@ -245,32 +232,17 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err) return nil, fmt.Errorf("error while saving token: %w", err)
} }
return fs.ConfigGoto("choose_device") return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion)) m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID) m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL) m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{ OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{ Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL, AuthURL: teliaCloudAuthURL,
TokenURL: teliaseCloudTokenURL, TokenURL: teliaCloudTokenURL,
}, },
ClientID: teliaseCloudClientID, ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },
@@ -290,21 +262,6 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },
}) })
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device": case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint? return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive Choosing no, the default, will let you access the storage used for the archive
@@ -865,7 +822,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) { func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() { if legacyTokenURL == req.URL.String() {
// read the entire body // read the entire body
refreshBody, err := io.ReadAll(req.Body) refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil { if err != nil {
return return
} }
@@ -875,7 +832,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1)) refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close()) // set the new ReadCloser (with a dummy Close())
req.Body = io.NopCloser(bytes.NewReader(refreshBody)) req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
} }
} }
@@ -1832,7 +1789,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File var tempFile *os.File
// create the cache file // create the cache file
tempFile, err = os.CreateTemp("", cachePrefix) tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil { if err != nil {
return return
} }
@@ -1860,7 +1817,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else { } else {
// that's a small file, just read it into memory // that's a small file, just read it into memory
var inData []byte var inData []byte
inData, err = io.ReadAll(teeReader) inData, err = ioutil.ReadAll(teeReader)
if err != nil { if err != nil {
return return
} }
@@ -1882,12 +1839,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil { if err == nil {
// if the object exists delete it // if the object exists delete it
err = o.remove(ctx, true) err = o.remove(ctx, true)
if err != nil && err != fs.ErrorObjectNotFound { if err != nil {
// if delete failed then report that, unless it was because the file did not exist after all
return fmt.Errorf("failed to remove old object: %w", err) return fmt.Errorf("failed to remove old object: %w", err)
} }
} else if err != fs.ErrorObjectNotFound { }
// if the object does not exist we can just continue but if the error is something different we should report that // if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err return err
} }
} }
@@ -1957,7 +1914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :) // copy the already uploaded bytes into the trash :)
var result api.UploadResponse var result api.UploadResponse
_, err = io.CopyN(io.Discard, in, response.ResumePos) _, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil { if err != nil {
return err return err
} }
@@ -1974,7 +1931,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5 o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0) o.modTime = time.Unix(result.Modified/1000, 0)
} else { } else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata // If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true) return o.readMetaData(ctx, true)
} }
@@ -1995,17 +1952,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true") opts.Parameters.Set("dl", "true")
} }
err := o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil) resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
} }
// Remove an object // Remove an object

View File

@@ -61,10 +61,9 @@ func init() {
Default: true, Default: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your user name.", Help: "Your user name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).", Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -377,7 +376,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files { for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name)) remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" { if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Time{}) entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else { } else {
entries[i] = &Object{ entries[i] = &Object{
fs: f, fs: f,

View File

@@ -7,13 +7,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"unicode/utf8" "unicode/utf8"
@@ -124,8 +124,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload. Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy - are being uploaded and aborts with a message which starts "can't copy
source file is being updated" if the file changes during upload. - source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g. However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -244,7 +244,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'. warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string warned map[string]struct{} // whether we have warned about this string
xattrSupported atomic.Int32 // whether xattrs are supported xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat // do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error) lstat func(name string) (os.FileInfo, error)
@@ -267,10 +267,7 @@ type Object struct {
// ------------------------------------------------------------ // ------------------------------------------------------------
var ( var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path // NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -292,7 +289,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat, lstat: os.Lstat,
} }
if xattrSupported { if xattrSupported {
f.xattrSupported.Store(1) f.xattrSupported = 1
} }
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -303,8 +300,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.FollowSymlinks { if opt.FollowSymlinks {
f.lstat = os.Stat f.lstat = os.Stat
@@ -315,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil { if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem) f.dev = readDevice(fi, f.opt.OneFileSystem)
} }
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) { if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root // It is a file, so use the parent as the root
f.root = filepath.Dir(f.root) f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -517,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue continue
} }
} }
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr) err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
@@ -534,14 +520,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name() name := fi.Name()
mode := fi.Mode() mode := fi.Mode()
newRemote := f.cleanRemote(dir, name) newRemote := f.cleanRemote(dir, name)
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
// Follow symlinks if required // Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name) localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath) fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) { if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks // Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err)) err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -566,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix newRemote += linkSuffix
} }
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi) fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -642,13 +624,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// //
// If it isn't empty it will return an error // If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
localPath := f.localPath(dir) return os.Remove(f.localPath(dir))
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
} }
// Precision of the file system // Precision of the file system
@@ -669,7 +645,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second precision = time.Second
// Create temporary file and test it // Create temporary file and test it
fd, err := os.CreateTemp("", "rclone") fd, err := ioutil.TempFile("", "rclone")
if err != nil { if err != nil {
// If failed return 1s // If failed return 1s
// fmt.Println("Failed to create temp file", err) // fmt.Println("Failed to create temp file", err)
@@ -1096,7 +1072,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil { if err != nil {
return nil, err return nil, err
} }
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
} }
// Open an object for read // Open an object for read
@@ -1423,27 +1399,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
} }
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string { func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") { if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) { if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s) s2, err := filepath.Abs(s)
if err == nil { if err == nil {
s = s2 s = s2
} }
} else {
s = filepath.Clean(s)
} }
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s) s = filepath.ToSlash(s)
vol := filepath.VolumeName(s) vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):]) s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s) s = filepath.FromSlash(s)
if !noUNC { if !noUNC {
// Convert to UNC // Convert to UNC
s = file.UNCPath(s) s = file.UNCPath(s)
} }
return s return s
} }
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s) s = enc.FromStandardPath(s)
return s return s
} }

View File

@@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -14,12 +14,10 @@ import (
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -35,6 +33,7 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating // Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) { func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test" filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now()) r.WriteFile(filePath, "content", time.Now())
@@ -79,6 +78,7 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
dir := f.root dir := f.root
@@ -147,24 +147,10 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt") _, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object // Check reading the object
in, err := o.Open(ctx) in, err := o.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
contents, err := io.ReadAll(in) contents, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt", string(contents)) require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -172,7 +158,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range // Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err) require.NoError(t, err)
contents, err = io.ReadAll(in) contents, err = ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents)) require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -191,6 +177,7 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) { func TestHashOnUpdate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -221,6 +208,7 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) { func TestHashOnDelete(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -249,6 +237,7 @@ func TestHashOnDelete(t *testing.T) {
func TestMetadata(t *testing.T) { func TestMetadata(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "metafile.txt" const filePath = "metafile.txt"
when := time.Now() when := time.Now()
const dayLength = len("2001-01-01") const dayLength = len("2001-01-01")
@@ -383,14 +372,12 @@ func TestMetadata(t *testing.T) {
func TestFilter(t *testing.T) { func TestFilter(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now() when := time.Now()
r.WriteFile("included", "included file", when) r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when) r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter // Add a filter
ctx, fi := filter.AddConfig(ctx) ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included")) require.NoError(t, fi.AddRule("+ included"))
@@ -411,147 +398,3 @@ func TestFilter(t *testing.T) {
sort.Sort(entries) sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries)) require.Equal(t, "[included]", fmt.Sprint(entries))
} }
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,42 +5,19 @@ package local
import ( import (
"fmt" "fmt"
"runtime"
"sync"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible // Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) { func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks { if o.fs.opt.FollowSymlinks {
flags = 0 flags = 0
} }
var stat unix.Statx_t var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 | err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT unix.STATX_MODE | // Want stx_mode & ~S_IFMT
@@ -68,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
setTime("btime", stat.Btime) setTime("btime", stat.Btime)
return nil return nil
} }
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,6 +1,7 @@
package local package local
import ( import (
"io/ioutil"
"os" "os"
"sync" "sync"
"testing" "testing"
@@ -12,7 +13,7 @@ import (
// Check we can remove an open file // Check we can remove an open file
func TestRemove(t *testing.T) { func TestRemove(t *testing.T) {
fd, err := os.CreateTemp("", "rclone-remove-test") fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err) require.NoError(t, err)
name := fd.Name() name := fd.Name()
defer func() { defer func() {

View File

@@ -6,6 +6,7 @@ package local
import ( import (
"fmt" "fmt"
"strings" "strings"
"sync/atomic"
"syscall" "syscall"
"github.com/pkg/xattr" "github.com/pkg/xattr"
@@ -27,7 +28,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris) // Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR { if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported // Show xattrs not supported
if f.xattrSupported.CompareAndSwap(1, 0) { if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err) fs.Errorf(f, "xattrs not supported - disabling: %v", err)
} }
return true return true
@@ -40,7 +41,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in // It doesn't return any attributes owned by this backend in
// metadataKeys // metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) { func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil, nil return nil, nil
} }
var list []string var list []string
@@ -89,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
// //
// It doesn't set any attributes owned by this backend in metadataKeys // It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) { func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil return nil
} }
for k, value := range metadata { for k, value := range metadata {

View File

@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))]) w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
} }
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string // WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) { func (w *BinWriter) WriteString(str string) {
buf := []byte(str) buf := []byte(str)

View File

@@ -18,6 +18,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
@@ -85,19 +86,13 @@ func init() {
Name: "mailru", Name: "mailru",
Description: "Mail.ru Cloud", Description: "Mail.ru Cloud",
NewFs: NewFs, NewFs: NewFs,
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name (usually email).", Help: "User name (usually email).",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: `Password. Help: "Password.",
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Required: true, Required: true,
IsPassword: true, IsPassword: true,
}, { }, {
@@ -214,7 +209,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }
@@ -646,7 +641,12 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
return nil, -1, err return nil, -1, err
} }
modTime := time.Unix(int64(item.Mtime), 0) mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote) isDir, err := f.isDir(item.Kind, remote)
if err != nil { if err != nil {
@@ -1660,7 +1660,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory // Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) { if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = io.ReadAll(in) fileBuf, err = ioutil.ReadAll(in)
if err != nil { if err != nil {
return err return err
} }
@@ -1703,7 +1703,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size { if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer. // Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil { if fileBuf == nil {
fileBuf, err = io.ReadAll(wrapIn) fileBuf, err = ioutil.ReadAll(wrapIn)
} }
if fileHash == nil && err == nil { if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf) fileHash = mrhash.Sum(fileBuf)
@@ -2058,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath())) req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size) req.WritePu64(o.size)
req.WriteP64(o.modTime.Unix()) req.WritePu64(o.modTime.Unix())
req.WritePu32(0) req.WritePu32(0)
req.Write(o.mrHash) req.Write(o.mrHash)
@@ -2214,7 +2214,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range") fs.Debugf(o, "Server returned full content instead of range")
if start > 0 { if start > 0 {
// Discard the beginning of the data // Discard the beginning of the data
_, err = io.CopyN(io.Discard, wrapStream, start) _, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil { if err != nil {
closeBody(res) closeBody(res)
return nil, err return nil, err

View File

@@ -58,10 +58,9 @@ func init() {
Description: "Mega", Description: "Mega",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name.", Help: "User name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: "Password.", Help: "Password.",
@@ -84,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`, permanently delete objects instead.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -112,7 +100,6 @@ type Options struct {
Pass string `config:"pass"` Pass string `config:"pass"`
Debug bool `config:"debug"` Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -206,7 +193,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can reuse and share // cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they // them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the // contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing // move code easier as we don't have to worry about mixing
@@ -217,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil { if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx)) srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) { srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...) fs.Infof("*go-mega*", format, v...)
}) })

View File

@@ -8,6 +8,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"strings" "strings"
"sync" "sync"
@@ -574,7 +575,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
data = data[:limit] data = data[:limit]
} }
return io.NopCloser(bytes.NewBuffer(data)), nil return ioutil.NopCloser(bytes.NewBuffer(data)), nil
} }
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
@@ -582,7 +583,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
data, err := io.ReadAll(in) data, err := ioutil.ReadAll(in)
if err != nil { if err != nil {
return fmt.Errorf("failed to update memory object: %w", err) return fmt.Errorf("failed to update memory object: %w", err)
} }

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@@ -65,13 +66,11 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to. Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`", Format should be ` + "`<domain>/<internal folders>`",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "account", Name: "account",
Help: "Set the NetStorage account name", Help: "Set the NetStorage account name",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "secret", Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication. Help: `Set the NetStorage account secret/G2O key for authentication.
@@ -821,8 +820,6 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header // Set Authorization header
dataHeader := generateDataHeader(f) dataHeader := generateDataHeader(f)
path := req.URL.RequestURI() path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0] actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path) fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader) req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
@@ -975,7 +972,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
URL = o.fs.url(src.Remote()) URL = o.fs.url(src.Remote())
} }
if strings.HasSuffix(URL, ".rclonelink") { if strings.HasSuffix(URL, ".rclonelink") {
bits, err := io.ReadAll(in) bits, err := ioutil.ReadAll(in)
if err != nil { if err != nil {
return err return err
} }
@@ -1061,7 +1058,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" { if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL) fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
reader := strings.NewReader(o.target) reader := strings.NewReader(o.target)
readcloser := io.NopCloser(reader) readcloser := ioutil.NopCloser(reader)
return readcloser, nil return readcloser, nil
} }

View File

@@ -126,7 +126,6 @@ type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available) Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available) Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available) QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
} }
// FileFacet groups file-related data on OneDrive into a single structure. // FileFacet groups file-related data on OneDrive into a single structure.

View File

@@ -131,11 +131,10 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_id", Name: "drive_id",
Help: "The ID of the drive to use.", Help: "The ID of the drive to use.",
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "drive_type", Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").", Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -149,8 +148,7 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get know the folder ID that you wish to access but not be able to get
there through a path traversal. there through a path traversal.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "access_scopes", Name: "access_scopes",
Help: `Set scopes to be requested by rclone. Help: `Set scopes to be requested by rclone.
@@ -198,9 +196,7 @@ listing, set this option.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will the files to copy are already shared between them. In other cases, rclone will
@@ -261,67 +257,6 @@ this flag there.
Help: `Set the password for links created by the link command. Help: `Set the password for links created by the link command.
At the time of writing this only works with OneDrive personal paid accounts. At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
If the hash requested does not exist on the object, it will be
returned as an empty string which is treated as a missing hash by
rclone.
`,
Examples: []fs.OptionExample{{
Value: "auto",
Help: "Rclone chooses the best hash",
}, {
Value: "quickxor",
Help: "QuickXor",
}, {
Value: "sha1",
Help: "SHA1",
}, {
Value: "sha256",
Help: "SHA256",
}, {
Value: "crc32",
Help: "CRC32",
}, {
Value: "none",
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: "av_override",
Default: false,
Help: `Allows download of files the server thinks has a virus.
The onedrive/sharepoint server may check files uploaded with an Anti
Virus checker. If it detects any potential viruses or malware it will
block download of the file.
In this case you will see a message like this
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
If you are 100% sure you want to download this file anyway then use
the --onedrive-av-override flag, or av_override = true in the config
file.
`, `,
Advanced: true, Advanced: true,
}, { }, {
@@ -572,18 +507,15 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url": case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL return fs.ConfigInput("url_end", "config_site_url", `Site URL
Examples: Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
`) `)
case "url_end": case "url_end":
siteURL := config.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: match[1], relativePath: "/sites/" + match[1],
}) })
} }
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -665,8 +597,6 @@ type Options struct {
LinkScope string `config:"link_scope"` LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"` LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"` LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -683,7 +613,6 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
hashType hash.Type // type of the hash we are using
} }
// Object describes a OneDrive object // Object describes a OneDrive object
@@ -697,7 +626,8 @@ type Object struct {
size int64 // size of the object size int64 // size of the object
modTime time.Time // modification time of the object modTime time.Time // modification time of the object
id string // ID of the object id string // ID of the object
hash string // Hash of the content, usually QuickXorHash but set as hash_type sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded) mimeType string // Content-Type of object from server (may not be as uploaded)
} }
@@ -952,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
driveType: opt.DriveType, driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
hashType: QuickXorHashType,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -962,21 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f) }).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
// Set the user defined hash
if opt.HashType == "auto" || opt.HashType == "" {
opt.HashType = QuickXorHashType.String()
}
err = f.hashType.Set(opt.HashType)
if err != nil {
return nil, err
}
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
f.features.ChangeNotify = nil
}
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, _, err := f.readMetaDataForPath(ctx, "") _, _, err := f.readMetaDataForPath(ctx, "")
@@ -1636,7 +1550,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.Set(f.hashType) if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
} }
// PublicLink returns a link for downloading without account. // PublicLink returns a link for downloading without account.
@@ -1751,10 +1668,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers) token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error { err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object) o, ok := obj.(*Object)
if !ok { if !ok {
@@ -1849,8 +1762,14 @@ func (o *Object) rootPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == o.fs.hashType { if o.fs.driveType == driveTypePersonal {
return o.hash, nil if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
return o.quickxorhash, nil
}
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1881,23 +1800,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
file := info.GetFile() file := info.GetFile()
if file != nil { if file != nil {
o.mimeType = file.MimeType o.mimeType = file.MimeType
o.hash = "" if file.Hashes.Sha1Hash != "" {
switch o.fs.hashType { o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
case QuickXorHashType: }
if file.Hashes.QuickXorHash != "" { if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash) h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil { if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err) fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else { } else {
o.hash = hex.EncodeToString(h) o.quickxorhash = hex.EncodeToString(h)
}
} }
case hash.SHA1:
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
case hash.SHA256:
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
case hash.CRC32:
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
} }
} }
fileSystemInfo := info.GetFileSystemInfo() fileSystemInfo := info.GetFileSystemInfo()
@@ -1993,20 +1905,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response var resp *http.Response
opts := o.fs.newOptsCall(o.id, "GET", "/content") opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options opts.Options = options
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err return nil, err
} }

View File

@@ -7,40 +7,51 @@
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash // See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash package quickxorhash
// This code was ported from a fast C-implementation from // This code was ported from the code snippet linked from
// https://github.com/namazso/QuickXorHash // https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// which has licenced as BSD Zero Clause License // Which has the copyright
//
// BSD Zero Clause License
//
// Copyright (c) 2022 namazso <admin@namazso.eu>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
import "hash" // ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
const ( const (
// BlockSize is the preferred size for hashing // BlockSize is the preferred size for hashing
BlockSize = 64 BlockSize = 64
// Size of the output checksum // Size of the output checksum
Size = 20 Size = 20
shift = 11 bitsInLastCell = 32
widthInBits = 8 * Size shift = 11
dataSize = shift * widthInBits widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
) )
type quickXorHash struct { type quickXorHash struct {
data [dataSize]byte data [dataSize]uint64
size uint64 lengthSoFar uint64
shiftSoFar int
} }
// New returns a new hash.Hash computing the quickXorHash checksum. // New returns a new hash.Hash computing the quickXorHash checksum.
@@ -59,37 +70,94 @@ func New() hash.Hash {
// //
// Implementations must not retain p. // Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) { func (q *quickXorHash) Write(p []byte) (n int, err error) {
var i int currentshift := q.shiftSoFar
// fill last remain
lastRemain := q.size % dataSize // The bitvector where we'll start xoring
if lastRemain != 0 { vectorArrayIndex := currentshift / 64
i += xorBytes(q.data[lastRemain:], p)
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
} }
if i != len(p) { for i := 0; i < iterations; i++ {
for len(p)-i >= dataSize { isLastCell := vectorArrayIndex == len(q.data)-1
i += xorBytes(q.data[:], p[i:]) var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
} }
xorBytes(q.data[:], p[i:])
} }
q.size += uint64(len(p))
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
return len(p), nil return len(p), nil
} }
// Calculate the current checksum // Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size + 1]byte) { func (q *quickXorHash) checkSum() (h [Size]byte) {
for i := 0; i < dataSize; i++ { // Output the data as little endian bytes
shift := (i * 11) % 160 ph := 0
shiftBytes := shift / 8 for i := 0; i < len(q.data)-1; i++ {
shiftBits := shift % 8 d := q.data[i]
shifted := int(q.data[i]) << shiftBits _ = h[ph+7] // bounds check
h[shiftBytes] ^= byte(shifted) h[ph+0] = byte(d >> (8 * 0))
h[shiftBytes+1] ^= byte(shifted >> 8) h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
} }
h[0] ^= h[20] // remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
// XOR the file length with the least significant bits in little endian format // XOR the file length with the least significant bits in little endian format
d := q.size d = q.lengthSoFar
h[Size-8] ^= byte(d >> (8 * 0)) h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1)) h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2)) h[Size-6] ^= byte(d >> (8 * 2))
@@ -106,7 +174,7 @@ func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
// It does not change the underlying hash state. // It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte { func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum() hash := q.checkSum()
return append(b, hash[:Size]...) return append(b, hash[:]...)
} }
// Reset resets the Hash to its initial state. // Reset resets the Hash to its initial state.
@@ -128,10 +196,8 @@ func (q *quickXorHash) BlockSize() int {
} }
// Sum returns the quickXorHash checksum of the data. // Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) (h [Size]byte) { func Sum(data []byte) [Size]byte {
var d quickXorHash var d quickXorHash
_, _ = d.Write(data) _, _ = d.Write(data)
s := d.checkSum() return d.checkSum()
copy(h[:], s[:])
return h
} }

View File

@@ -4,7 +4,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"hash" "hash"
"math/rand"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -167,16 +166,3 @@ func TestReset(t *testing.T) {
// check interface // check interface
var _ hash.Hash = (*quickXorHash)(nil) var _ hash.Hash = (*quickXorHash)(nil)
func BenchmarkQuickXorHash(b *testing.B) {
b.SetBytes(1 << 20)
buf := make([]byte, 1<<20)
rand.Read(buf)
h := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)
}
}

View File

@@ -1,20 +0,0 @@
//go:build !go1.20
package quickxorhash
func xorBytes(dst, src []byte) int {
n := len(dst)
if len(src) < n {
n = len(src)
}
if n == 0 {
return 0
}
dst = dst[:n]
//src = src[:n]
src = src[:len(dst)] // remove bounds check in loop
for i := range dst {
dst[i] ^= src[i]
}
return n
}

View File

@@ -1,9 +0,0 @@
//go:build go1.20
package quickxorhash
import "crypto/subtle"
func xorBytes(dst, src []byte) int {
return subtle.XORBytes(dst, src, dst)
}

View File

@@ -42,10 +42,9 @@ func init() {
Description: "OpenDrive", Description: "OpenDrive",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
Help: "Username.", Help: "Username.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Password.", Help: "Password.",
@@ -767,17 +766,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err) return nil, fmt.Errorf("failed to get folder list: %w", err)
} }

View File

@@ -1,129 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"os"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
)
const (
sseDefaultAlgorithm = "AES256"
)
func getSha256(p []byte) []byte {
h := sha256.New()
h.Write(p)
return h.Sum(nil)
}
func validateSSECustomerKeyOptions(opt *Options) error {
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
}
if opt.SSEKMSKeyID != "" {
return nil
}
err := populateSSECustomerKeys(opt)
if err != nil {
return err
}
return nil
}
func populateSSECustomerKeys(opt *Options) error {
if opt.SSECustomerKeyFile != "" {
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
if err != nil {
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
}
opt.SSECustomerKey = strings.TrimSpace(string(data))
}
if opt.SSECustomerKey != "" {
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
if err != nil {
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
}
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
if opt.SSECustomerKeySha256 == "" {
opt.SSECustomerKeySha256 = sha256Checksum
} else {
if opt.SSECustomerKeySha256 != sha256Checksum {
return fmt.Errorf("the computed SHA256 checksum "+
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
sha256Checksum, opt.SSECustomerKeySha256)
}
}
if opt.SSECustomerAlgorithm == "" {
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
}
}
return nil
}
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -1,178 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/rsa"
"errors"
"net/http"
"os"
"path"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/common/auth"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
)
func expandPath(filepath string) (expandedPath string) {
if filepath == "" {
return filepath
}
cleanedPath := path.Clean(filepath)
expandedPath = cleanedPath
if strings.HasPrefix(cleanedPath, "~") {
rest := cleanedPath[2:]
home, err := os.UserHomeDir()
if err != nil {
return expandedPath
}
expandedPath = path.Join(home, rest)
}
return
}
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
switch opt.Provider {
case instancePrincipal:
return auth.InstancePrincipalConfigurationProvider()
case userPrincipal:
expandConfigFilePath := expandPath(opt.ConfigFile)
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
}
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
case resourcePrincipal:
return auth.ResourcePrincipalConfigurationProvider()
case noAuth:
fs.Infof("client", "using no auth provider")
return getNoAuthConfiguration()
default:
}
return common.DefaultConfigProvider(), nil
}
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
p, err := getConfigurationProvider(opt)
if err != nil {
return nil, err
}
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
if err != nil {
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
return nil, err
}
if opt.Region != "" {
client.SetRegion(opt.Region)
}
modifyClient(ctx, opt, &client.BaseClient)
return &client, err
}
func fileExists(filePath string) bool {
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
return false
}
return true
}
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
client.HTTPClient = getHTTPClient(ctx)
if opt.Provider == noAuth {
client.Signer = getNoAuthSigner()
}
}
// getClient makes http client according to the global options
// this has rclone specific options support like dump headers, body etc.
func getHTTPClient(ctx context.Context) *http.Client {
return fshttp.NewClient(ctx)
}
var retryErrorCodes = []int{
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is an ocierr object, try and extract more useful information to determine if we should retry
if ociError, ok := err.(common.ServiceError); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(err) {
return true, err
}
// If it is a timeout then we want to retry that
if ociError.GetCode() == "RequestTimeout" {
return true, err
}
}
// Ok, not an oci error, check for generic failure conditions
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
return &noAuthConfigurator{}, nil
}
func getNoAuthSigner() common.HTTPRequestSigner {
return &noAuthSigner{}
}
type noAuthConfigurator struct {
}
type noAuthSigner struct {
}
func (n *noAuthSigner) Sign(*http.Request) error {
return nil
}
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
return nil, nil
}
func (n *noAuthConfigurator) KeyID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) UserOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) Region() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
return common.AuthConfig{
AuthType: common.UnknownAuthenticationType,
IsFromConfigFile: false,
OboToken: nil,
}, nil
}
// Check the interfaces are satisfied
var (
_ common.ConfigurationProvider = &noAuthConfigurator{}
_ common.HTTPRequestSigner = &noAuthSigner{}
)

View File

@@ -1,292 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"sort"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Command Interface Implementation
// ------------------------------------------------------------
const (
operationRename = "rename"
operationListMultiPart = "list-multipart-uploads"
operationCleanup = "cleanup"
)
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage Examples:
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
},
}
/*
Command the backend to run a named command
The command run is name
args may be used to read arguments from
opts may be used to read optional arguments from
The result should be capable of being JSON encoded
If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
if len(args) < 2 {
return nil, fmt.Errorf("path to object or its new name to rename is empty")
}
remote := args[0]
newName := args[1]
return f.rename(ctx, remote, newName)
case operationListMultiPart:
return f.listMultipartUploadsAll(ctx)
case operationCleanup:
maxAge := 24 * time.Hour
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
default:
return nil, fs.ErrorCommandNotFound
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
if newName == "" {
return nil, fmt.Errorf("the object's new name cannot be empty")
}
o := &Object{
fs: f,
remote: remote,
}
bucketName, objectPath := o.split()
err := o.readMetaData(ctx)
if err != nil {
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
if strings.HasPrefix(objectPath, bucketName) {
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
objectPath, bucketName)
}
return nil, fs.ErrorNotAFile
}
details := objectstorage.RenameObjectDetails{
SourceName: common.String(objectPath),
NewName: common.String(newName),
}
request := objectstorage.RenameObjectRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
RenameObjectDetails: details,
OpcClientRequestId: nil,
RequestMetadata: common.RequestMetadata{},
}
var response objectstorage.RenameObjectResponse
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.RenameObject(ctx, request)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
return "renamed successfully", nil
}
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
err error) {
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
bucket, directory := f.split("")
if bucket != "" {
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
if err != nil {
return uploadsMap, err
}
uploadsMap[bucket] = uploads
return uploadsMap, nil
}
entries, err := f.listBuckets(ctx)
if err != nil {
return uploadsMap, err
}
for _, entry := range entries {
bucket := entry.Remote()
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
if listErr != nil {
err = listErr
fs.Errorf(f, "%v", err)
}
uploadsMap[bucket] = uploads
}
return uploadsMap, err
}
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
var response objectstorage.ListMultipartUploadsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploads(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
// fs.Debugf(f, "failed to list multi part uploads %v", err)
return uploads, err
}
for index, item := range response.Items {
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
if exact {
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploads, nil
}
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -1,156 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Implement Copier is an optional interfaces for Fs
//------------------------------------------------------------
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj)
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
srcBucket, srcPath := srcObj.split()
dstBucket, dstPath := dstObj.split()
if dstBucket != srcBucket {
exists, err := f.bucketExists(ctx, dstBucket)
if err != nil {
return err
}
if !exists {
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
}
}
copyObjectDetails := objectstorage.CopyObjectDetails{
SourceObjectName: common.String(srcPath),
DestinationRegion: common.String(dstObj.fs.opt.Region),
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
DestinationBucket: common.String(dstBucket),
DestinationObjectName: common.String(dstPath),
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
}
req := objectstorage.CopyObjectRequest{
NamespaceName: common.String(srcObj.fs.opt.Namespace),
BucketName: common.String(srcBucket),
CopyObjectDetails: copyObjectDetails,
}
useBYOKCopyObject(f, &req)
var resp objectstorage.CopyObjectResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CopyObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
workRequestID := resp.OpcWorkRequestId
timeout := time.Duration(f.opt.CopyTimeout)
dstName := dstObj.String()
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
// To enable server side copy object, customers will have to
// grant policy to objectstorage service to manage object-family
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
// Another option to avoid the policy is to download and reupload the file.
// This download upload will work for maximum file size limit of 5GB
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
if err != nil {
return err
}
return err
}
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
client *objectstorage.ObjectStorageClient) error {
stateConf := &StateChangeConf{
Pending: []string{
string(objectstorage.WorkRequestStatusAccepted),
string(objectstorage.WorkRequestStatusInProgress),
string(objectstorage.WorkRequestStatusCanceling),
},
Target: []string{
string(objectstorage.WorkRequestSummaryStatusCompleted),
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
wr := &workRequestResponse.WorkRequest
return workRequestResponse, string(wr.Status), err
},
Timeout: timeout,
}
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
if e != nil {
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
}
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
if wr.Status == objectstorage.WorkRequestStatusFailed {
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
}
return nil
}
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
req := objectstorage.ListWorkRequestErrorsRequest{}
req.WorkRequestId = workRequestID
res, err := client.ListWorkRequestErrors(ctx, req)
if err != nil {
return "", err
}
allErrs := make([]string, 0)
for _, errs := range res.Items {
allErrs = append(allErrs, *errs.Message)
}
errorMessage := strings.Join(allErrs, "\n")
return errorMessage, nil
}

View File

@@ -1,441 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

Some files were not shown because too many files have changed in this diff Show More