1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-05 01:53:14 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
c917d2d5b4 s3: fix --s3-versions when copying a single object
Before this change, if --s3-versions was enabled, then copying a
single object from a subdirectory would fail.

This was due to an incorrect comparison in the NewFs code.

This fixes the change and introduces a new unit tests.
2022-09-05 18:56:11 +01:00
656 changed files with 17938 additions and 63205 deletions

4
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,6 +0,0 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -8,31 +8,29 @@ name: build
on: on:
push: push:
branches: branches:
- '**' - '*'
tags: tags:
- '**' - '*'
pull_request: pull_request:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
manual: manual:
description: Manual run (bypass default conditions)
type: boolean
required: true required: true
default: true default: true
jobs: jobs:
build: build:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20'] job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.21.0-rc.4' go: '1.19.x'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -43,14 +41,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '1.21.0-rc.4' go: '1.19.x'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-11 os: macos-11
go: '1.21.0-rc.4' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -59,14 +57,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-11 os: macos-11
go: '1.21.0-rc.4' go: '1.19.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '1.21.0-rc.4' go: '1.19.x'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -76,20 +74,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.21.0-rc.4' go: '1.19.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.19 - job_name: go1.17
os: ubuntu-latest os: ubuntu-latest
go: '1.19' go: '1.17.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
- job_name: go1.20 - job_name: go1.18
os: ubuntu-latest os: ubuntu-latest
go: '1.20' go: '1.18.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -99,13 +97,14 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v2
with: with:
stable: 'false'
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
@@ -124,17 +123,12 @@ jobs:
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
shell: bash shell: bash
run: | run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
unset HOMEBREW_NO_INSTALL_FROM_API
brew untap --force homebrew/core
brew untap --force homebrew/cask
brew update brew update
brew install --cask macfuse brew install --cask macfuse
if: matrix.os == 'macos-11' if: matrix.os == 'macos-11'
@@ -168,7 +162,7 @@ jobs:
env env
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -222,17 +216,17 @@ jobs:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)' # working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork # Deploy binaries if enabled in config && not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
@@ -240,39 +234,26 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.21.0-rc.4'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android: android:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v1
with: with:
go-version: '1.21.0-rc.4' go-version: 1.19.x
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -357,4 +338,4 @@ jobs:
env: env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork # Upload artifacts if not a PR && not a fork
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,61 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
# either be the user whom created the Release or manually triggered
# the workflow_dispatch.
username: ${{ github.actor }}
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
# GitHub Actions at the start of a workflow run to identify the job.
# This is used to authenticate against GitHub Container Registry.
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .
push: true # push the image to ghcr
tags: |
ghcr.io/rclone/rclone:beta
rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha
cache-to: type=gha,mode=max
provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252

View File

@@ -0,0 +1,26 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job name: Build image job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -1,14 +0,0 @@
name: Publish to Winget
on:
release:
types: [released]
jobs:
publish:
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:
identifier: Rclone.Rclone
installers-regex: '-windows-\w+\.zip$'
token: ${{ secrets.WINGET_TOKEN }}

View File

@@ -2,17 +2,15 @@
linters: linters:
enable: enable:
- deadcode
- errcheck - errcheck
- goimports - goimports
- revive - revive
- ineffassign - ineffassign
- structcheck
- varcheck
- govet - govet
- unconvert - unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
#- prealloc #- prealloc
#- maligned #- maligned
disable-all: true disable-all: true
@@ -22,35 +20,11 @@ issues:
exclude-use-default: false exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings:
revive:
rules:
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]

View File

@@ -419,7 +419,7 @@ remote or an fs.
Research Research
* Look at the interfaces defined in `fs/types.go` * Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes * Study one or more of the existing remotes
Getting going Getting going
@@ -428,19 +428,14 @@ Getting going
* box is a good one to start from if you have a directory-based remote * box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote * b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info` * `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info` * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json` * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine * open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests Unit tests
* Create a config entry called `TestRemote` for the unit tests to use * Create a config entry called `TestRemote` for the unit tests to use

View File

@@ -11,7 +11,7 @@ RUN ./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse3 tzdata && \ RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -16,9 +16,6 @@ Current active maintainers of rclone are:
| Max Sum | @Max-Sum | union backend | | Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend | | Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend | | Caleb Case | @calebcase | storj backend |
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
**This is a work in progress Draft** **This is a work in progress Draft**

7712
MANUAL.html generated

File diff suppressed because it is too large Load Diff

8168
MANUAL.md generated

File diff suppressed because it is too large Load Diff

8558
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,9 +81,6 @@ quicktest:
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@@ -96,7 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux # Get the release dependencies we only install on linux
release_dep_linux: release_dep_linux:
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
# Get the release dependencies we only install on Windows # Get the release dependencies we only install on Windows
release_dep_windows: release_dep_windows:

View File

@@ -25,19 +25,18 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
@@ -46,13 +45,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -62,30 +59,23 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/) * OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/) * Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
## Making a release ## Making a release
* git checkout master # see below for stable branch * git checkout master # see below for stable branch
* git pull # IMPORTANT * git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check GitHub actions build for master is Green
* make test # see integration test server or run locally * make test # see integration test server or run locally
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX.0"
* make retag * make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin * git push --follow-tags origin
* # Wait for the GitHub builds to complete then... * # Wait for the GitHub builds to complete then...
* make fetch_binaries * make fetch_binaries
@@ -54,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release ## Making a point release
If rclone needs a point release due to some horrendous bug: If rclone needs a point release due to some horrendous bug:
@@ -75,7 +66,8 @@ Set vars
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable * make startstable
Now Now

View File

@@ -1 +1 @@
v1.64.0 v1.60.0

View File

@@ -24,6 +24,7 @@ import (
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
@@ -34,11 +35,8 @@ import (
_ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/pikpak"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
@@ -46,7 +44,6 @@ import (
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"
@@ -54,6 +51,5 @@ import (
_ "github.com/rclone/rclone/backend/uptobox" _ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zip"
_ "github.com/rclone/rclone/backend/zoho" _ "github.com/rclone/rclone/backend/zoho"
) )

File diff suppressed because it is too large Load Diff

View File

@@ -6,10 +6,10 @@
package azureblob package azureblob
import ( import (
"context"
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -17,31 +17,10 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{},
MinChunkSize: defaultChunkSize,
},
})
}
// TestIntegration2 runs integration tests against the remote
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{
RemoteName: name,
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
}) })
} }
@@ -53,6 +32,36 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
) )
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct { tests := map[string]struct {
accessTier string accessTier string

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -75,15 +75,13 @@ func init() {
Description: "Backblaze B2", Description: "Backblaze B2",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Account ID or Application Key ID.", Help: "Account ID or Application Key ID.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "key", Name: "key",
Help: "Application Key.", Help: "Application Key.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.", Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -1223,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err) fs.Errorf(object.Name, "Can't create object %v", err)
continue continue
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name) err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err) checkErr(err)
tr.Done(ctx, err) tr.Done(ctx, err)
@@ -1237,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil { if err != nil {
fs.Errorf(object, "Can't create object %+v", err) fs.Errorf(object, "Can't create object %+v", err)
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
// Check current version of the file // Check current version of the file
if object.Action == "hide" { if object.Action == "hide" {

View File

@@ -14,7 +14,6 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"time"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -22,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -430,47 +428,18 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
) )
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error { g.Go(func() error {
for part := int64(1); part <= up.parts; part++ { for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
buf := getBuf() buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts. // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
putBuf(buf) up.f.putBuf(buf, up.doCopy)
return nil return nil
} }
@@ -484,14 +453,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
buf = buf[:reqSize] buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf) _, err = io.ReadFull(up.in, buf)
if err != nil { if err != nil {
putBuf(buf) up.f.putBuf(buf, up.doCopy)
return err return err
} }
} }
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {
defer putBuf(buf) defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy { if !up.doCopy {
err = up.transferChunk(gCtx, part, buf) err = up.transferChunk(gCtx, part, buf)
} else { } else {

View File

@@ -17,9 +17,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -27,7 +27,6 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/golang-jwt/jwt/v4"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -46,6 +45,7 @@ import (
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -76,11 +76,6 @@ var (
} }
) )
type boxCustomClaims struct {
jwt.StandardClaims
BoxSubType string `json:"box_sub_type,omitempty"`
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -107,18 +102,16 @@ func init() {
return nil, nil return nil, nil
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0", Default: "0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
Sensitive: true,
}, { }, {
Name: "box_sub_type", Name: "box_sub_type",
Default: "user", Default: "user",
@@ -185,12 +178,12 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
signingHeaders := getSigningHeaders(boxConfig) signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig) queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, tokenURL, *claims, signingHeaders, queryParams, privateKey, m, client) err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err return err
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile) file, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err) return nil, fmt.Errorf("box: failed to read Box config: %w", err)
} }
@@ -201,31 +194,34 @@ func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
return boxConfig, nil return boxConfig, nil
} }
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomClaims, err error) { func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20) val, err := jwtutil.RandomHex(20)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err) return nil, fmt.Errorf("box: failed to generate random string for jti: %w", err)
} }
claims = &boxCustomClaims{ claims = &jws.ClaimSet{
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely Iss: boxConfig.BoxAppSettings.ClientID,
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019 Sub: boxConfig.EnterpriseID,
StandardClaims: jwt.StandardClaims{ Aud: tokenURL,
Id: val, Exp: time.Now().Add(time.Second * 45).Unix(),
Issuer: boxConfig.BoxAppSettings.ClientID, PrivateClaims: map[string]interface{}{
Subject: boxConfig.EnterpriseID, "box_sub_type": boxSubType,
Audience: tokenURL, "aud": tokenURL,
ExpiresAt: time.Now().Add(time.Second * 45).Unix(), "jti": val,
}, },
BoxSubType: boxSubType,
} }
return claims, nil return claims, nil
} }
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} { func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := map[string]interface{}{ signingHeaders := &jws.Header{
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
} }
return signingHeaders return signingHeaders
} }

View File

@@ -76,19 +76,17 @@ func init() {
Name: "plex_url", Name: "plex_url",
Help: "The URL of the Plex server.", Help: "The URL of the Plex server.",
}, { }, {
Name: "plex_username", Name: "plex_username",
Help: "The username of the Plex user.", Help: "The username of the Plex user.",
Sensitive: true,
}, { }, {
Name: "plex_password", Name: "plex_password",
Help: "The password of the Plex user.", Help: "The password of the Plex user.",
IsPassword: true, IsPassword: true,
}, { }, {
Name: "plex_token", Name: "plex_token",
Help: "The plex token for authentication - auto set normally.", Help: "The plex token for authentication - auto set normally.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server.", Help: "Skip all certificate verification when connecting to the Plex server.",
@@ -1040,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
fs.Debugf(dir, "list: remove entry: %v", entryRemote) fs.Debugf(dir, "list: remove entry: %v", entryRemote)
} }
entries = nil //nolint:ineffassign entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
@@ -1789,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
} }
} }
// StopBackgroundRunners will signal all the runners to stop their work // StopBackgroundRunners will signall all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs // can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() { func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false f.cleanupChan <- false

View File

@@ -11,6 +11,7 @@ import (
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"os" "os"
@@ -101,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) { func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix()) id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs // Instantiate inner fs
innerFolder := "inner" innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder) runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil) rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content") runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "") listRoot, err := runInstance.list(t, rootFs, "")
@@ -164,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")} li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 { for _, r := range li2 {
var err error var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r)))) ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 { if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r) log.Printf("========== '%v' not in cache", r)
} else { } else {
@@ -223,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) { func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix()) id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -255,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) { func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix()) id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err) require.Error(t, err)
@@ -265,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -292,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object // write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content") runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -310,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error var err error
// create some rand test data // create some rand test data
@@ -339,7 +347,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) { func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -369,7 +378,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) { func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -395,7 +405,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) { func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix()) id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -449,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) { func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix()) id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -535,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) { func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix()) id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -621,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix()) id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -653,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -674,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -709,7 +725,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) { func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix()) id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -746,7 +763,9 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10 vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skipf("skipping crypt") t.Skipf("skipping crypt")
@@ -822,7 +841,7 @@ func newRun() *run {
} }
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil { if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err)) panic(fmt.Sprintf("Failed to create temp dir: %v", err))
} }
@@ -847,7 +866,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc return enc
} }
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise() fstest.Initialise()
remoteExists := false remoteExists := false
for _, s := range config.FileSections() { for _, s := range config.FileSections() {
@@ -940,15 +959,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "") err := f.Features().Purge(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
@@ -970,7 +984,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024) chunk := int64(1024)
cnt := size / chunk cnt := size / chunk
left := size % chunk left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for i := 0; i < int(cnt); i++ { for i := 0; i < int(cnt); i++ {
@@ -1098,6 +1112,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err return l, err
} }
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer func() {
_ = in.Close()
}()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
return err
}
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error

View File

@@ -21,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) { func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err) require.NoError(t, err)
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) { func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) { func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
func TestInternalUploadMoveExistingFile(t *testing.T) { func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix()) id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) { func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -152,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) { func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
totalFiles := 10 totalFiles := 10
randInstance := rand.New(rand.NewSource(time.Now().Unix())) rand.Seed(time.Now().Unix())
lastFile := "" lastFile := ""
for i := 0; i < totalFiles; i++ { for i := 0; i < totalFiles; i++ {
size := int64(randInstance.Intn(maxSize-minSize) + minSize) size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size) testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin" remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader) runInstance.writeRemoteReader(t, rootFs, remote, testReader)
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) { func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo" id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) { func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue continue
} }
var data []byte var data []byte
data, err = io.ReadAll(resp.Body) data, err = ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
continue continue
} }

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp) data, err := ioutil.ReadFile(fp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm) err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"path" "path"
"regexp" "regexp"
@@ -1037,7 +1038,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
metadata, err := io.ReadAll(reader) metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return err return err
@@ -1096,7 +1097,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil { if err != nil {
return "", err return "", err
} }
data, err := io.ReadAll(reader) data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -5,7 +5,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io" "io/ioutil"
"path" "path"
"regexp" "regexp"
"strings" "strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil { if r == nil {
return return
} }
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err) assert.NoError(t, err)
var chunkContents []byte var chunkContents []byte
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r) chunkContents, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx) r, err = willyChunk.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
_, err = io.ReadAll(r) _, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description) assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description) assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil { if err == nil && r != nil {
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description) assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok") assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close() _ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err) assert.Error(t, err)
// Rcat must fail // Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc")) in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil) robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj) assert.Nil(t, robj)
assert.NotNil(t, err) assert.NotNil(t, err)
if err != nil { if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx) r, err := dstFile.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, r) assert.NotNil(t, r)
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()

View File

@@ -1,4 +1,4 @@
// Package combine implements a backend to combine multiple remotes in a directory tree // Package combine implents a backend to combine multiple remotes in a directory tree
package combine package combine
/* /*
@@ -233,7 +233,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
canMove := true canMove := true
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -290,16 +289,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// Enable CleanUp when any upstreams support it
if features.CleanUp == nil {
for _, u := range f.upstreams {
if u.f.Features().CleanUp != nil {
features.CleanUp = f.CleanUp
break
}
}
}
// Enable ChangeNotify when any upstreams support it // Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil { if features.ChangeNotify == nil {
for _, u := range f.upstreams { for _, u := range f.upstreams {
@@ -310,9 +299,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
} }
} }
// show that we wrap other backends
features.Overlay = true
f.features = features f.features = features
// Get common intersection of hashes // Get common intersection of hashes
@@ -365,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait() return g.Wait()
} }
// join the elements together but unlike path.Join return empty string // join the elements together but unline path.Join return empty string
func join(elem ...string) string { func join(elem ...string) string {
result := path.Join(elem...) result := path.Join(elem...)
if result == "." { if result == "." {
@@ -645,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil { if err != nil {
return nil, err return nil, err
} }
uSrc := fs.NewOverrideRemote(src, uRemote) uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object var o fs.Object
if stream { if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...) o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
@@ -901,100 +887,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
}) })
} }
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return "", err
}
do := u.f.Features().PublicLink
if do == nil {
return "", fs.ErrorNotImplemented
}
return do(ctx, uRemote, expire, unlink)
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
do := u.f.Features().PutUnchecked
if do == nil {
return nil, fs.ErrorNotImplemented
}
uSrc := fs.NewOverrideRemote(src, uRemote)
return do(ctx, in, uSrc, options...)
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
var (
u *upstream
uDirs []fs.Directory
)
for _, dir := range dirs {
uNew, uDir, err := f.findUpstream(dir.Remote())
if err != nil {
return err
}
if u == nil {
u = uNew
} else if u != uNew {
return fmt.Errorf("can't merge directories from different upstreams")
}
uDirs = append(uDirs, fs.NewOverrideDirectory(dir, uDir))
}
do := u.f.Features().MergeDirs
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx, uDirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().CleanUp; do != nil {
return do(ctx)
}
return nil
})
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := u.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, uRemote, size)
}
// Object describes a wrapped Object // Object describes a wrapped Object
// //
// This is a wrapped Object which knows its path prefix // This is a wrapped Object which knows its path prefix
@@ -1024,7 +916,7 @@ func (o *Object) String() string {
func (o *Object) Remote() string { func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String()) newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil { if err != nil {
fs.Errorf(o.Object, "Bad object: %v", err) fs.Errorf(o, "Bad object: %v", err)
return err.Error() return err.Error()
} }
return newPath return newPath
@@ -1096,10 +988,5 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.FullObject = (*Object)(nil)
) )

View File

@@ -10,11 +10,6 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{}
)
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
@@ -22,8 +17,8 @@ func TestIntegration(t *testing.T) {
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods, UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: unimplementableObjectMethods, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -40,9 +35,7 @@ func TestLocal(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -58,9 +51,7 @@ func TestMemory(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
QuickTestOK: true, QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }
@@ -77,8 +68,6 @@ func TestMixed(t *testing.T) {
{Name: name, Key: "type", Value: "combine"}, {Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams}, {Name: name, Key: "upstreams", Value: upstreams},
}, },
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
}) })
} }

View File

@@ -13,6 +13,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"regexp" "regexp"
"strings" "strings"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -186,7 +186,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs // We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true f.features.ReadMimeType = true
@@ -368,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
meta, err := readMetadata(ctx, mo) meta := readMetadata(ctx, mo)
if err != nil { if meta == nil {
return nil, fmt.Errorf("error decoding metadata: %w", err) return nil, errors.New("error decoding metadata")
} }
// Create our Object // Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode)) o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil { return f.newObject(o, mo, meta), err
return nil, err
}
return f.newObject(o, mo, meta), nil
} }
// checkCompressAndType checks if an object is compressible and determines it's mime type // checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -468,7 +464,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
} }
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-") tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() { defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them // to ignore them
@@ -546,8 +542,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
} }
// Transfer the data // Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx)) //o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil { if err != nil {
if o != nil { if o != nil {
removeErr := o.Remove(ctx) removeErr := o.Remove(ctx)
@@ -681,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
} }
return nil, err return nil, err
} }
return f.newObject(dataObject, mo, meta), nil return f.newObject(dataObject, mo, meta), err
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
@@ -1044,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
} }
// This function will read the metadata from a metadata object. // This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object // Open our meradata object
rc, err := mo.Open(ctx) rc, err := mo.Open(ctx)
if err != nil { if err != nil {
return nil, err return nil
} }
defer fs.CheckClose(rc, &err) defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc) jr := json.NewDecoder(rc)
meta = new(ObjectMetadata) meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil { if err = jr.Decode(meta); err != nil {
return nil, err return nil
} }
return meta, nil return meta
} }
// Remove removes this object // Remove removes this object
@@ -1101,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote() origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible { if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() { if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil { if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr return removeErr
@@ -1117,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// If we are, just update the object and metadata // If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil { }
return err if err != nil {
} return err
} }
// Update object metadata and return // Update object metadata and return
o.Object = newObject.Object o.Object = newObject.Object
@@ -1130,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1145,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand. // This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object { func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1175,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err return err
} }
if o.meta == nil { if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo) o.meta = readMetadata(ctx, o.mo)
} }
return err return err
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/version" "github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
@@ -38,6 +37,7 @@ const (
blockHeaderSize = secretbox.Overhead blockHeaderSize = secretbox.Overhead
blockDataSize = 64 * 1024 blockDataSize = 64 * 1024
blockSize = blockHeaderSize + blockDataSize blockSize = blockHeaderSize + blockDataSize
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
) )
// Errors returned by cipher // Errors returned by cipher
@@ -53,9 +53,8 @@ var (
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
ErrorFileClosed = errors.New("file already closed") ErrorFileClosed = errors.New("file already closed")
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix") ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
ErrorBadSeek = errors.New("Seek beyond end of file") ErrorBadSeek = errors.New("Seek beyond end of file")
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
obfuscQuoteRune = '!' obfuscQuoteRune = '!'
) )
@@ -170,30 +169,27 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
// Cipher defines an encoding and decoding cipher for the crypt backend // Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct { type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
block gocipher.Block block gocipher.Block
mode NameEncryptionMode mode NameEncryptionMode
fileNameEnc fileNameEncoding fileNameEnc fileNameEncoding
buffers sync.Pool // encrypt/decrypt buffers buffers sync.Pool // encrypt/decrypt buffers
cryptoRand io.Reader // read crypto random numbers from here cryptoRand io.Reader // read crypto random numbers from here
dirNameEncrypt bool dirNameEncrypt bool
passBadBlocks bool // if set passed bad blocks as zeroed blocks
encryptedSuffix string
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
c := &Cipher{ c := &Cipher{
mode: mode, mode: mode,
fileNameEnc: enc, fileNameEnc: enc,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
encryptedSuffix: ".bin",
} }
c.buffers.New = func() interface{} { c.buffers.New = func() interface{} {
return new([blockSize]byte) return make([]byte, blockSize)
} }
err := c.Key(password, salt) err := c.Key(password, salt)
if err != nil { if err != nil {
@@ -202,29 +198,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
return c, nil return c, nil
} }
// setEncryptedSuffix set suffix, or an empty string
func (c *Cipher) setEncryptedSuffix(suffix string) {
if strings.EqualFold(suffix, "none") {
c.encryptedSuffix = ""
return
}
if !strings.HasPrefix(suffix, ".") {
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
suffix = "." + suffix
}
c.encryptedSuffix = suffix
}
// Call to set bad block pass through
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
c.passBadBlocks = passBadBlocks
}
// Key creates all the internal keys from the password passed in using // Key creates all the internal keys from the password passed in using
// scrypt. // scrypt.
// //
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slightly harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty password makes all 0x00 keys which is used in the
// tests. // tests.
@@ -252,12 +230,15 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() *[blockSize]byte { func (c *Cipher) getBlock() []byte {
return c.buffers.Get().(*[blockSize]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf *[blockSize]byte) { func (c *Cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
c.buffers.Put(buf) c.buffers.Put(buf)
} }
@@ -527,7 +508,7 @@ func (c *Cipher) encryptFileName(in string) string {
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *Cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + c.encryptedSuffix return in + encryptedSuffix
} }
return c.encryptFileName(in) return c.encryptFileName(in)
} }
@@ -587,8 +568,8 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(c.encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) { if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} }
decrypted := in[:remainingLength] decrypted := in[:remainingLength]
@@ -628,7 +609,7 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
// fromReader fills the nonce from an io.Reader - normally the OSes // fromReader fills the nonce from an io.Reader - normally the OSes
// crypto random number generator // crypto random number generator
func (n *nonce) fromReader(in io.Reader) error { func (n *nonce) fromReader(in io.Reader) error {
read, err := readers.ReadFill(in, (*n)[:]) read, err := io.ReadFull(in, (*n)[:])
if read != fileNonceSize { if read != fileNonceSize {
return fmt.Errorf("short read of nonce: %w", err) return fmt.Errorf("short read of nonce: %w", err)
} }
@@ -683,8 +664,8 @@ type encrypter struct {
in io.Reader in io.Reader
c *Cipher c *Cipher
nonce nonce nonce nonce
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -709,9 +690,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
} }
} }
// Copy magic into buffer // Copy magic into buffer
copy((*fh.buf)[:], fileMagicBytes) copy(fh.buf, fileMagicBytes)
// Copy nonce into buffer // Copy nonce into buffer
copy((*fh.buf)[fileMagicSize:], fh.nonce[:]) copy(fh.buf[fileMagicSize:], fh.nonce[:])
return fh, nil return fh, nil
} }
@@ -726,20 +707,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
if fh.bufIndex >= fh.bufSize { if fh.bufIndex >= fh.bufSize {
// Read data // Read data
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := (*fh.readBuf)[:blockDataSize] readBuf := fh.readBuf[:blockDataSize]
n, err = readers.ReadFill(fh.in, readBuf) n, err = io.ReadFull(fh.in, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return fh.finish(err) return fh.finish(err)
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFill will return 0, err // data and the next call to ReadFull will return 0, err
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize]) n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
fh.bufIndex += n fh.bufIndex += n
return n, nil return n, nil
} }
@@ -780,8 +763,8 @@ type decrypter struct {
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *Cipher
buf *[blockSize]byte buf []byte
readBuf *[blockSize]byte readBuf []byte
bufIndex int bufIndex int
bufSize int bufSize int
err error err error
@@ -799,12 +782,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
limit: -1, limit: -1,
} }
// Read file header (magic + nonce) // Read file header (magic + nonce)
readBuf := (*fh.readBuf)[:fileHeaderSize] readBuf := fh.readBuf[:fileHeaderSize]
n, err := readers.ReadFill(fh.rc, readBuf) _, err := io.ReadFull(fh.rc, readBuf)
if n < fileHeaderSize && err == io.EOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
// This read from 0..fileHeaderSize-1 bytes // This read from 0..fileHeaderSize-1 bytes
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
} else if err != io.EOF && err != nil { } else if err != nil {
return nil, fh.finishAndClose(err) return nil, fh.finishAndClose(err)
} }
// check the magic // check the magic
@@ -862,8 +845,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
func (fh *decrypter) fillBuffer() (err error) { func (fh *decrypter) fillBuffer() (err error) {
// FIXME should overlap the reads with a go-routine and 2 buffers? // FIXME should overlap the reads with a go-routine and 2 buffers?
readBuf := fh.readBuf readBuf := fh.readBuf
n, err := readers.ReadFill(fh.rc, (*readBuf)[:]) n, err := io.ReadFull(fh.rc, readBuf)
if n == 0 { if n == 0 {
// err can't be nil since:
// n == len(buf) if and only if err == nil.
return err return err
} }
// possibly err != nil here, but we will process the data and // possibly err != nil here, but we will process the data and
@@ -871,25 +856,18 @@ func (fh *decrypter) fillBuffer() (err error) {
// Check header + 1 byte exists // Check header + 1 byte exists
if n <= blockHeaderSize { if n <= blockHeaderSize {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey) _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil && err != io.EOF { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
} }
if !fh.c.passBadBlocks { return ErrorEncryptedBadBlock
return ErrorEncryptedBadBlock
}
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
// Zero out the bad block and continue
for i := range (*fh.buf)[:n] {
(*fh.buf)[i] = 0
}
} }
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = n - blockHeaderSize fh.bufSize = n - blockHeaderSize
@@ -915,7 +893,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
if fh.limit >= 0 && fh.limit < int64(toCopy) { if fh.limit >= 0 && fh.limit < int64(toCopy) {
toCopy = int(fh.limit) toCopy = int(fh.limit)
} }
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy]) n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
fh.bufIndex += n fh.bufIndex += n
if fh.limit >= 0 { if fh.limit >= 0 {
fh.limit -= int64(n) fh.limit -= int64(n)
@@ -926,8 +904,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil return n, nil
} }
// calculateUnderlying converts an (offset, limit) in an encrypted file // calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file. // into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// //
// It also returns number of bytes to discard after reading the first // It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can // block and number of blocks this is from the start so the nonce can

View File

@@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"strings" "strings"
"testing" "testing"
@@ -27,14 +28,14 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""}, {"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""}, {"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""}, {"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "unknown file name encryption mode \"potato\""}, {"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
} { } {
actual, actualErr := NewNameEncryptionMode(test.in) actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected) assert.Equal(t, actual, test.expected)
if test.expectedErr == "" { if test.expectedErr == "" {
assert.NoError(t, actualErr) assert.NoError(t, actualErr)
} else { } else {
assert.EqualError(t, actualErr, test.expectedErr) assert.Error(t, actualErr, test.expectedErr)
} }
} }
} }
@@ -405,13 +406,6 @@ func TestNonStandardEncryptFileName(t *testing.T) {
// Off mode // Off mode
c, _ := newCipher(NameEncryptionOff, "", "", true, nil) c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Off mode with custom suffix
c, _ = newCipher(NameEncryptionOff, "", "", true, nil)
c.setEncryptedSuffix(".jpg")
assert.Equal(t, "1/12/123.jpg", c.EncryptFileName("1/12/123"))
// Off mode with empty suffix
c.setEncryptedSuffix("none")
assert.Equal(t, "1/12/123", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil) c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
@@ -490,27 +484,21 @@ func TestNonStandardDecryptFileName(t *testing.T) {
in string in string
expected string expected string
expectedErr error expectedErr error
customSuffix string
}{ }{
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil, ""}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil, ""}, {NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionOff, true, "1/12/123.jpg", "1/12/123", nil, ".jpg"}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil, "none"}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil, ""}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile, ""}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil, ""}, {NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil, ""},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil, ""},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
if test.customSuffix != "" {
c.setEncryptedSuffix(test.customSuffix)
}
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode) what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
assert.Equal(t, test.expected, actual, what) assert.Equal(t, test.expected, actual, what)
@@ -739,7 +727,7 @@ func TestNonceFromReader(t *testing.T) {
assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x) assert.Equal(t, nonce{'1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o'}, x)
buf = bytes.NewBufferString("123456789abcdefghijklmn") buf = bytes.NewBufferString("123456789abcdefghijklmn")
err = x.fromReader(buf) err = x.fromReader(buf)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
func TestNonceFromBuf(t *testing.T) { func TestNonceFromBuf(t *testing.T) {
@@ -1063,7 +1051,7 @@ func TestRandomSource(t *testing.T) {
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1e8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.EqualError(t, err, "Error in stream at 1") assert.Error(t, err, "Error in stream")
} }
type zeroes struct{} type zeroes struct{}
@@ -1085,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize) source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil) encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err) assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted)) decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err) assert.NoError(t, err)
sink := newRandomSource(copySize) sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf) n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1156,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
out, err := io.ReadAll(encrypted) out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expected, out) assert.Equal(t, test.expected, out)
// Check we can decode the data properly too... // Check we can decode the data properly too...
buf = bytes.NewBuffer(out) buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf)) decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err) assert.NoError(t, err)
out, err = io.ReadAll(decrypted) out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.in, out) assert.Equal(t, test.in, out)
} }
@@ -1180,13 +1168,13 @@ func TestNewEncrypter(t *testing.T) {
fh, err := c.newEncrypter(z, nil) fh, err := c.newEncrypter(z, nil)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce) assert.Equal(t, nonce{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.nonce)
assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, (*fh.buf)[:32]) assert.Equal(t, []byte{'R', 'C', 'L', 'O', 'N', 'E', 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}, fh.buf[:32])
// Test error path // Test error path
c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn") c.cryptoRand = bytes.NewBufferString("123456789abcdefghijklmn")
fh, err = c.newEncrypter(z, nil) fh, err = c.newEncrypter(z, nil)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "short read of nonce: EOF") assert.Error(t, err, "short read of nonce")
} }
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to // Test the stream returning 0, io.ErrUnexpectedEOF - this used to
@@ -1199,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -1237,7 +1225,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0[:i])) cd := newCloseDetector(bytes.NewBuffer(file0[:i]))
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedFileTooShort.Error()) assert.Error(t, err, ErrorEncryptedFileTooShort.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1245,7 +1233,7 @@ func TestNewDecrypter(t *testing.T) {
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// bad magic // bad magic
@@ -1256,7 +1244,7 @@ func TestNewDecrypter(t *testing.T) {
cd := newCloseDetector(bytes.NewBuffer(file0copy)) cd := newCloseDetector(bytes.NewBuffer(file0copy))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
file0copy[i] ^= 0x1 file0copy[i] ^= 0x1
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
@@ -1269,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -1286,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data // Make random data
const dataSize = 150000 const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize)) plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err) assert.NoError(t, err)
// Encrypt the data // Encrypt the data
buf := bytes.NewBuffer(plaintext) buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted) ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1312,7 +1300,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext) end = len(ciphertext)
} }
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
} }
@@ -1502,16 +1490,14 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what) assert.NoError(t, err, what)
continue continue
} }
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
var expectedErr error var expectedErr error
switch { switch {
case i == fileHeaderSize: case i == fileHeaderSize:
// This would normally produce an error *except* on the first block // This would normally produce an error *except* on the first block
expectedErr = nil expectedErr = nil
case i <= fileHeaderSize+blockHeaderSize:
expectedErr = ErrorEncryptedFileBadHeader
default: default:
expectedErr = ErrorEncryptedBadBlock expectedErr = io.ErrUnexpectedEOF
} }
if expectedErr != nil { if expectedErr != nil {
assert.EqualError(t, err, expectedErr.Error(), what) assert.EqualError(t, err, expectedErr.Error(), what)
@@ -1528,8 +1514,8 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// Test corrupting the input // Test corrupting the input
@@ -1538,28 +1524,17 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16) copy(file16copy, file16)
for i := range file16copy { for i := range file16copy {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.EqualError(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.EqualError(t, err, ErrorEncryptedBadBlock.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
} }
// Test that we can corrupt a byte and read zeroes if
// passBadBlocks is set
copy(file16copy, file16)
file16copy[len(file16copy)-1] ^= 0xFF
c.passBadBlocks = true
fh, err = c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
assert.NoError(t, err)
buf, err := io.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, make([]byte, 16), buf)
} }
func TestDecrypterClose(t *testing.T) { func TestDecrypterClose(t *testing.T) {
@@ -1580,7 +1555,7 @@ func TestDecrypterClose(t *testing.T) {
// double close // double close
err = fh.Close() err = fh.Close()
assert.EqualError(t, err, ErrorFileClosed.Error()) assert.Error(t, err, ErrorFileClosed.Error())
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
// try again reading the file this time // try again reading the file this time
@@ -1590,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// close after reading // close after reading
out, err := io.ReadAll(fh) out, err := ioutil.ReadAll(fh)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []byte{1}, out) assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err) assert.Equal(t, io.EOF, fh.err)
@@ -1607,6 +1582,8 @@ func TestPutGetBlock(t *testing.T) {
block := c.getBlock() block := c.getBlock()
c.putBlock(block) c.putBlock(block)
c.putBlock(block) c.putBlock(block)
assert.Panics(t, func() { c.putBlock(block[:len(block)-1]) })
} }
func TestKey(t *testing.T) { func TestKey(t *testing.T) {

View File

@@ -48,7 +48,7 @@ func init() {
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, { }, {
Value: "off", Value: "off",
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.", Help: "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
}, },
}, },
}, { }, {
@@ -79,9 +79,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it. pointing to the same backend you can use it.
@@ -121,15 +119,6 @@ names, or for debugging purposes.`,
Help: "Encrypt file data.", Help: "Encrypt file data.",
}, },
}, },
}, {
Name: "pass_bad_blocks",
Help: `If set this will pass bad blocks through as all 0.
This should not be set in normal operation, it should only be set if
trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
}, { }, {
Name: "filename_encoding", Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string. Help: `How to encode the encrypted filename to text string.
@@ -149,18 +138,10 @@ length and if it's case sensitive.`,
}, },
{ {
Value: "base32768", Value: "base32768",
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)", Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
}, },
}, },
Advanced: true, Advanced: true,
}, {
Name: "suffix",
Help: `If this is set it will override the default suffix of ".bin".
Setting suffix to "none" will result in an empty suffix. This may be useful
when the path length is critical.`,
Default: ".bin",
Advanced: true,
}}, }},
}) })
} }
@@ -193,8 +174,6 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make cipher: %w", err) return nil, fmt.Errorf("failed to make cipher: %w", err)
} }
cipher.setEncryptedSuffix(opt.Suffix)
cipher.setPassBadBlocks(opt.PassBadBlocks)
return cipher, nil return cipher, nil
} }
@@ -256,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false, WriteMimeType: false,
@@ -268,7 +247,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err return f, err
@@ -284,9 +262,7 @@ type Options struct {
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"` ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"` FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
} }
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
@@ -420,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil { if err == nil && o != nil {
@@ -439,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of // Find a hash the destination supports to compute a hash of
// the encrypted data // the encrypted data
ht := f.Fs.Hashes().GetOne() ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
if ht != hash.None { if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -478,7 +449,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash) return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) fs.Debugf(src, "%v = %s OK", ht, srcHash)
} }
@@ -1076,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one // Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok { if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion // Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok { } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Unwrap if it is an operations.OverrideRemote // Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap() srcObj = do.UnWrap()
} else { } else {
// Otherwise don't unwrap any further
return "", nil return "", nil
} }
// if this is wrapping a local object then we work out the hash // if this is wrapping a local object then we work out the hash

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from // Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) { func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background()) localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), "")) require.NoError(t, localFs.Rmdir(context.Background(), ""))
}) }
return localFs return localFs, cleanup
} }
// Upload a file to a remote // Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) { func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC) t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil) upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc) obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, obj.Remove(context.Background())) require.NoError(t, obj.Remove(context.Background()))
}) }
return obj return obj, cleanup
} }
// Test the ObjectInfo // Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap" path = "_wrap"
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents) obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data // encrypt the data
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj var oi fs.ObjectInfo = obj
if wrap { if wrap {
// wrap the object in an fs.ObjectUnwrapper if required // wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote") oi = testWrapper{oi}
} }
// wrap the object in a crypt for upload using the nonce we // wrap the object in a crypt for upload using the nonce we
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs) t.Skipf("%v: does not support hashes", f.Fs)
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object // Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents) localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also // Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents) remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object // Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType) computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -14,10 +14,11 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"net/http" "net/http"
"os"
"path" "path"
"regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@@ -202,7 +203,7 @@ func init() {
m.Set("root_folder_id", "appDataFolder") m.Set("root_folder_id", "appDataFolder")
} }
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth { if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{ return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig, OAuth2Config: driveConfig,
}) })
@@ -277,23 +278,20 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point. a non root folder as its starting point.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "team_drive", Name: "team_drive",
Help: "ID of the Shared Drive (Team Drive).", Help: "ID of the Shared Drive (Team Drive).",
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "auth_owner_only", Name: "auth_owner_only",
Default: false, Default: false,
@@ -419,11 +417,10 @@ date is used.`,
Help: "Size of listing chunk 100-1000, 0 to disable.", Help: "Size of listing chunk 100-1000, 0 to disable.",
Advanced: true, Advanced: true,
}, { }, {
Name: "impersonate", Name: "impersonate",
Default: "", Default: "",
Help: `Impersonate this user when using a service account.`, Help: `Impersonate this user when using a service account.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "alternate_export", Name: "alternate_export",
Default: false, Default: false,
@@ -455,11 +452,7 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to "cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone indicate you acknowledge the risks of downloading the file and rclone
will download it anyway. will download it anyway.`,
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "keep_revision_forever", Name: "keep_revision_forever",
@@ -503,9 +496,7 @@ need to use --ignore size also.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different drive configs.
Allow server-side operations (e.g. copy) to work across different drive configs.
This can be useful if you wish to do a server-side copy between two This can be useful if you wish to do a server-side copy between two
different Google drives. Note that this isn't enabled by default different Google drives. Note that this isn't enabled by default
@@ -596,8 +587,7 @@ Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the user you've authenticated rclone with) seems to be enough so that the
resource key is no needed. resource key is no needed.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -605,18 +595,6 @@ resource key is no needed.
// Encode invalid UTF-8 bytes as json doesn't handle them properly. // Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive. // Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8, Default: encoder.EncodeInvalidUtf8,
}, {
Name: "env_auth",
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
@@ -673,7 +651,6 @@ type Options struct {
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"` ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
} }
// Fs represents a remote drive server // Fs represents a remote drive server
@@ -781,7 +758,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" { } else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err) fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") { } else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err) fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" { } else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
@@ -1131,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -1142,12 +1119,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err) return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
} }
} else if opt.EnvAuth {
scopes := driveScopes(opt.Scope)
oAuthClient, err = google.DefaultClient(ctx, scopes...)
if err != nil {
return nil, fmt.Errorf("failed to create client from environment: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt)) oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
if err != nil { if err != nil {
@@ -1239,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -1519,9 +1489,6 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if strings.HasSuffix(remote, "/") {
return nil, fs.ErrorIsDir
}
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote) info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -2909,7 +2876,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder") changesCall.Spaces("appDataFolder")
} }
changesCall.RestrictToMyDrive(!f.opt.SharedWithMe)
changeList, err = changesCall.Context(ctx).Do() changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
@@ -3356,9 +3322,9 @@ This takes an optional directory to trash which make this easier to
use via the API. use via the API.
rclone backend untrash drive:directory rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir rclone backend -i untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it. Use the -i flag to see what would be restored before restoring it.
Result: Result:
@@ -3388,7 +3354,7 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be If the destination is a drive backend then server-side copying will be
attempted if possible. attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying. Use the -i flag to see what would be copied before copying.
`, `,
}, { }, {
Name: "exportformats", Name: "exportformats",
@@ -3464,12 +3430,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil { if err != nil {
return nil, err return nil, err
} }
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok { if _, ok := opt["config"]; ok {
lines := []string{} lines := []string{}
upstreams := []string{} upstreams := []string{}
names := make(map[string]struct{}, len(drives)) names := make(map[string]struct{}, len(drives))
for i, drive := range drives { for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name) name := re.ReplaceAllString(drive.Name, "_")
for { for {
if _, found := names[name]; !found { if _, found := names[name]; !found {
break break
@@ -3832,7 +3799,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit] data = data[:limit]
} }
return io.NopCloser(bytes.NewReader(data)), nil return ioutil.NopCloser(bytes.NewReader(data)), nil
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3891,7 +3858,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
newO, err := o.fs.newObjectWithInfo(ctx, o.remote, info) newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"os" "os"
"path" "path"
@@ -77,7 +78,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct { var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"` ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"` ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -243,15 +244,6 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403) quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry) assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError) assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
} }
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {
@@ -526,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) { func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -13,6 +13,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
@@ -139,12 +140,55 @@ func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionF
return complete, nil return complete, nil
} }
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxSleepTime = 1 * time.Second
startTime := time.Now()
try := 1
for {
remaining := time.Duration(b.f.opt.BatchCommitTimeout) - time.Since(startTime)
if remaining < 0 {
break
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d remaining %v", sleepTime, err, try, remaining)
} else {
if batchStatus.Tag == "complete" {
fs.Debugf(b.f, "Upload batch completed in %v", time.Since(startTime))
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d remaining %v", sleepTime, batchStatus.Tag, try, remaining)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > maxSleepTime {
sleepTime = maxSleepTime
}
try++
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, fmt.Errorf("wait for batch failed after %d tries in %v: %w", try, time.Since(startTime), err)
}
// commit a batch // commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) { func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync // If commit fails then signal clients if sync
var signalled = b.async var signalled = b.async
defer func() { defer func() {
if err != nil && !signalled { if err != nil && signalled {
// Signal to clients that there was an error // Signal to clients that there was an error
for _, result := range results { for _, result := range results {
result <- batcherResponse{err: err} result <- batcherResponse{err: err}

View File

@@ -58,7 +58,7 @@ import (
const ( const (
rcloneClientID = "5jcck7diasz0rqy" rcloneClientID = "5jcck7diasz0rqy"
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g" rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
defaultMinSleep = fs.Duration(10 * time.Millisecond) minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
// Upload chunk size - setting too small makes uploads slow. // Upload chunk size - setting too small makes uploads slow.
@@ -182,9 +182,8 @@ client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere. v1.55 or later is in use everywhere.
`, `,
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "shared_files", Name: "shared_files",
Help: `Instructs rclone to work on individual shared files. Help: `Instructs rclone to work on individual shared files.
@@ -261,8 +260,8 @@ uploaded.
The default for this is 0 which means rclone will choose a sensible The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use. default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s - batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 500ms - batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use - batch_mode: off - not in use
`, `,
Default: fs.Duration(0), Default: fs.Duration(0),
@@ -272,11 +271,6 @@ default based on the batch_mode in use.
Help: `Max time to wait for a batch to finish committing`, Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute), Default: fs.Duration(10 * time.Minute),
Advanced: true, Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
Help: "Minimum time to sleep between API calls.",
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -305,7 +299,6 @@ type Options struct {
BatchTimeout fs.Duration `config:"batch_timeout"` BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"` BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"` AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -449,7 +442,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
opt: *opt, opt: *opt,
ci: ci, ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout)) f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil { if err != nil {
@@ -543,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default: default:
return nil, err return nil, err
} }
// if the mount failed we have to abort here // if the moint failed we have to abort here
} }
// if the mount succeeded it's now a normal folder in the users root namespace // if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally // we disable shared folder mode and proceed normally
@@ -726,7 +719,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
} }
for _, entry := range res.Entries { for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name) leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId) d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d) entries = append(entries, d)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -913,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath)) leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id) d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d) entries = append(entries, d)
} else if fileInfo != nil { } else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo) o, err := f.newObjectWithInfo(ctx, remote, fileInfo)

View File

@@ -118,9 +118,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
Single: 1, Single: 1,
Pass: f.opt.FilePassword, Pass: f.opt.FilePassword,
} }
if f.opt.CDN {
request.CDN = 1
}
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/download/get_token.cgi", Path: "/download/get_token.cgi",
@@ -408,32 +405,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
return response, nil return response, nil
} }
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
request := &MoveDirRequest{
FolderID: folderID,
DestinationFolderID: destinationFolderID,
Rename: newLeaf,
// DestinationUser: destinationUser,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mv.cgi",
}
response = &MoveDirResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("couldn't move dir: %w", err)
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) { func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{ request := &CopyFileRequest{
URLs: []string{url}, URLs: []string{url},
@@ -502,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("didn't get an upload node: %w", err) return nil, fmt.Errorf("didnt got an upload node: %w", err)
} }
// fs.Debugf(f, "Got Upload node") // fs.Debugf(f, "Got Upload node")

View File

@@ -38,9 +38,8 @@ func init() {
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.", Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
Name: "api_key", Name: "api_key",
Sensitive: true,
}, { }, {
Help: "If you want to download a shared folder, add this parameter.", Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder", Name: "shared_folder",
@@ -55,11 +54,6 @@ func init() {
Name: "folder_password", Name: "folder_password",
Advanced: true, Advanced: true,
IsPassword: true, IsPassword: true,
}, {
Help: "Set if you wish to use CDN download links.",
Name: "cdn",
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -95,7 +89,6 @@ type Options struct {
SharedFolder string `config:"shared_folder"` SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"` FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"` FolderPassword string `config:"folder_password"`
CDN bool `config:"cdn"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -340,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) { if size > int64(300e9) {
return nil, errors.New("File too big, can't upload") return nil, errors.New("File too big, cant upload")
} else if size == 0 { } else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles return nil, fs.ErrorCantUploadEmptyFiles
} }
@@ -488,51 +481,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil return dstObj, nil
} }
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove.
//
// If destination exists then return fs.ErrorDirExists.
//
// This is complicated by the fact that we can't use moveDir to move
// to a different directory AND rename at the same time as it can
// overwrite files in the source directory.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
if err != nil {
return err
}
srcIDnumeric, err := strconv.Atoi(srcID)
if err != nil {
return err
}
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
if err != nil {
return err
}
var resp *MoveDirResponse
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
if err != nil {
return fmt.Errorf("couldn't rename leaf: %w", err)
}
if resp.Status != "OK" {
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
}
srcFs.dirCache.FlushDir(srcRemote)
return nil
}
// Copy src to this remote using server side move operations. // Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
@@ -606,7 +554,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -20,7 +20,6 @@ type DownloadRequest struct {
URL string `json:"url"` URL string `json:"url"`
Single int `json:"single"` Single int `json:"single"`
Pass string `json:"pass,omitempty"` Pass string `json:"pass,omitempty"`
CDN int `json:"cdn,omitempty"`
} }
// RemoveFolderRequest is the request structure of the corresponding request // RemoveFolderRequest is the request structure of the corresponding request
@@ -70,22 +69,6 @@ type MoveFileResponse struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`
} }
// MoveDirRequest is the request structure of the corresponding request
type MoveDirRequest struct {
FolderID int `json:"folder_id"`
DestinationFolderID int `json:"destination_folder_id,omitempty"`
DestinationUser string `json:"destination_user"`
Rename string `json:"rename,omitempty"`
}
// MoveDirResponse is the response structure of the corresponding request
type MoveDirResponse struct {
Status string `json:"status"`
Message string `json:"message"`
OldName string `json:"old_name"`
NewName string `json:"new_name"`
}
// CopyFileRequest is the request structure of the corresponding request // CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct { type CopyFileRequest struct {
URLs []string `json:"urls"` URLs []string `json:"urls"`

View File

@@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -84,7 +85,6 @@ Leave blank normally.
Fill in to make rclone start with directory of a given ID. Fill in to make rclone start with directory of a given ID.
`, `,
Sensitive: true,
}, { }, {
Name: "permanent_token", Name: "permanent_token",
Help: `Permanent Authentication Token. Help: `Permanent Authentication Token.
@@ -98,7 +98,6 @@ These tokens are normally valid for several years.
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
`, `,
Sensitive: true,
}, { }, {
Name: "token", Name: "token",
Help: `Session Token. Help: `Session Token.
@@ -108,8 +107,7 @@ usually valid for 1 hour.
Don't set this value - rclone will set it automatically. Don't set this value - rclone will set it automatically.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "token_expiry", Name: "token_expiry",
Help: `Token expiry time. Help: `Token expiry time.
@@ -1188,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
if o.contentType == emptyMimeType { if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{ resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/rclone/ftp" "github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/proxy"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
) )
@@ -49,15 +48,13 @@ func init() {
Description: "FTP", Description: "FTP",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".", Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "user", Name: "user",
Help: "FTP username.", Help: "FTP username.",
Default: currentUser, Default: currentUser,
Sensitive: true,
}, { }, {
Name: "port", Name: "port",
Help: "FTP port number.", Help: "FTP port number.",
@@ -73,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTPS.`, than port 21. Cannot be used in combination with explicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "explicit_tls", Name: "explicit_tls",
@@ -81,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
When using explicit FTP over TLS the client explicitly requests When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTPS.`, to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
@@ -127,11 +124,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)", Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
@@ -175,18 +167,6 @@ Enabled by default. Use 0 to disable.`,
If this is set and no password is supplied then rclone will ask for a password If this is set and no password is supplied then rclone will ask for a password
`, `,
Advanced: true, Advanced: true,
}, {
Name: "socks_proxy",
Default: "",
Help: `Socks 5 proxy host.
Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -225,13 +205,11 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"` DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"` DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"` WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"` AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -331,33 +309,18 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Return a *textproto.Error if err contains one or nil otherwise
func textprotoError(err error) (errX *textproto.Error) {
if errors.As(err, &errX) {
return errX
}
return nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be // shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience // retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) { func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
if isRetriableFtpError(err) { switch errX := err.(type) {
return true, err case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
} }
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
@@ -367,49 +330,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address) conn, err = fshttp.NewDialer(ctx).Dial(network, address)
defer func() { if f.tlsConf != nil && err == nil {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err) conn = tls.Client(conn, f.tlsConf)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
} }
if err != nil { return
return nil, err
}
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
} }
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -417,6 +345,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -433,9 +367,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM { if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true)) ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
} }
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }
@@ -499,7 +430,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
if tpErr := textprotoError(err); tpErr != nil { var tpErr *textproto.Error
if !errors.As(err, &tpErr) {
nopErr := c.NoOp() nopErr := c.NoOp()
if nopErr != nil { if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr) fs.Debugf(f, "Connection failed, closing: %v", nopErr)
@@ -601,7 +533,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// set the pool drainer timer going // set the pool drainer timer going
if f.opt.IdleTimeout > 0 { if f.opt.IdleTimeout > 0 {
@@ -649,7 +580,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// translateErrorFile turns FTP errors into rclone errors if possible for a file // translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error { func translateErrorFile(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound err = fs.ErrorObjectNotFound
@@ -660,7 +592,8 @@ func translateErrorFile(err error) error {
// translateErrorDir turns FTP errors into rclone errors if possible for a directory // translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error { func translateErrorDir(err error) error {
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored: case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
@@ -691,7 +624,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
if remote == "" || remote == "." || remote == "/" { fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry // if root, assume exists and synthesize an entry
return &ftp.Entry{ return &ftp.Entry{
Name: "", Name: "",
@@ -699,38 +633,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(), Time: time.Now(),
}, nil }, nil
} }
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("findItem: %w", err) return nil, fmt.Errorf("findItem: %w", err)
} }
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusBadArguments:
err = nil
}
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -749,7 +658,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -771,7 +680,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not // dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return false, fmt.Errorf("dirExists: %w", err) return false, fmt.Errorf("dirExists: %w", err)
} }
@@ -915,18 +824,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) { func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
file, err := f.findItem(ctx, remote) dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("getInfo: %w", err)
} else if file != nil { }
info := &FileInfo{ files, err := c.List(f.dirFromStandardPath(dir))
Name: remote, f.putFtpConnection(&c, err)
Size: file.Size, if err != nil {
ModTime: file.Time, return nil, translateErrorFile(err)
precise: f.fLstTime, }
IsDir: file.Type == ftp.EntryTypeFolder,
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
} }
return info, nil
} }
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
@@ -957,7 +880,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(f.dirFromStandardPath(abspath))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181 case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil err = nil
@@ -1126,7 +1050,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.fSetTime { if !o.fs.fSetTime {
fs.Debugf(o.fs, "SetModTime is not supported") fs.Errorf(o.fs, "SetModTime is not supported")
return nil return nil
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection(ctx)
@@ -1198,7 +1122,8 @@ func (f *ftpReadCloser) Close() error {
// mask the error if it was caused by a premature close // mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd // NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257 // See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil { switch errX := err.(type) {
case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil err = nil
@@ -1224,26 +1149,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
} }
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil { if err != nil {
return nil, fmt.Errorf("open: %w", err) return nil, fmt.Errorf("open: %w", err)
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs} rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil return rc, nil
} }
@@ -1276,10 +1190,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
// Ignore error 250 here - send by some servers // Ignore error 250 here - send by some servers
if errX := textprotoError(err); errX != nil { if err != nil {
switch errX.Code { switch errX := err.(type) {
case ftp.StatusRequestedFileActionOK: case *textproto.Error:
err = nil switch errX.Code {
case ftp.StatusRequestedFileActionOK:
err = nil
}
} }
} }
if err != nil { if err != nil {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout // test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) { func (f *Fs) testUploadTimeout(t *testing.T) {
const ( const (
fileSize = 100000000 // 100 MiB fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup maxTime = 10 * time.Second // prevent test hangup
) )
if testing.Short() { if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -82,8 +82,7 @@ func init() {
saFile, _ := m.Get("service_account_file") saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials") saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous") anonymous, _ := m.Get("anonymous")
envAuth, _ := m.Get("env_auth") if saFile != "" || saCreds != "" || anonymous == "true" {
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil return nil, nil
} }
return oauthutil.ConfigOut("", &oauthutil.Options{ return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -91,21 +90,15 @@ func init() {
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number", Name: "project_number",
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
Sensitive: true,
}, {
Name: "user_project",
Help: "User project.\n\nOptional - needed only for requester pays.",
Sensitive: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Sensitive: true,
}, { }, {
Name: "anonymous", Name: "anonymous",
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
@@ -304,15 +297,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY", Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class", Help: "Durable reduced availability storage class",
}}, }},
}, {
Name: "directory_markers",
Default: false,
Advanced: true,
Help: `Upload an empty object with a trailing slash when a new directory is created
Empty folders are unsupported for bucket based remotes, this option creates an empty
object ending with "/", to persist the folder.
`,
}, { }, {
Name: "no_check_bucket", Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it. Help: `If set, don't attempt to check the bucket exists or create it.
@@ -346,17 +330,6 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base | Default: (encoder.Base |
encoder.EncodeCrLf | encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...), }}...),
}) })
} }
@@ -364,7 +337,6 @@ can't check the size and hash but the file contents will be decompressed.
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ProjectNumber string `config:"project_number"` ProjectNumber string `config:"project_number"`
UserProject string `config:"user_project"`
ServiceAccountFile string `config:"service_account_file"` ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"` ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"` Anonymous bool `config:"anonymous"`
@@ -377,8 +349,6 @@ type Options struct {
Decompress bool `config:"decompress"` Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
DirectoryMarkers bool `config:"directory_markers"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -474,7 +444,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath // split returns bucket and bucketPath from the rootRelativePath
// relative to f.root // relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath)) bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
} }
@@ -517,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -530,11 +500,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err) return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
} }
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil { if err != nil {
@@ -560,9 +525,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = oAuthClient f.client = oAuthClient
@@ -579,11 +541,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx) _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
_, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -643,13 +601,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
directory += "/" directory += "/"
} }
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
if !recurse { if !recurse {
list = list.Delimiter("/") list = list.Delimiter("/")
} }
foundItems := 0
for { for {
var objects *storage.Objects var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -665,7 +619,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
return err return err
} }
if !recurse { if !recurse {
foundItems += len(objects.Prefixes)
var object storage.Object var object storage.Object
for _, remote := range objects.Prefixes { for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") { if !strings.HasSuffix(remote, "/") {
@@ -686,29 +639,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
} }
} }
foundItems += len(objects.Items)
for _, object := range objects.Items { for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name) remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
// is this a directory marker?
if isDirectory {
// Don't insert the root directory
if remote == directory {
continue
}
// process directory markers as directories
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):] remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket { if addBucket {
remote = path.Join(bucket, remote) remote = path.Join(bucket, remote)
} }
// is this a directory marker?
err = fn(remote, object, isDirectory) if isDirectory {
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil { if err != nil {
return err return err
} }
@@ -718,17 +664,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
list.PageToken(objects.NextPageToken) list.PageToken(objects.NextPageToken)
} }
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
// Determine whether the directory exists or not by whether it has a marker
_, err := f.readObjectInfo(ctx, bucket, directory)
if err != nil {
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
return err
}
}
return nil return nil
} }
@@ -772,9 +707,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, errors.New("can't list buckets without project number") return nil, errors.New("can't list buckets without project number")
} }
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
if f.opt.UserProject != "" {
listBuckets = listBuckets.UserProject(f.opt.UserProject)
}
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -892,69 +824,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.Put(ctx, in, src, options...) return f.Put(ctx, in, src, options...)
} }
// Create directory marker file and parents
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
if !f.opt.DirectoryMarkers || bucket == "" {
return nil
}
// Object to be uploaded
o := &Object{
fs: f,
modTime: time.Now(),
}
for {
_, bucketPath := f.split(dir)
// Don't create the directory marker if it is the bucket or at the very root
if bucketPath == "" {
break
}
o.remote = dir + "/"
// Check to see if object already exists
_, err := o.readObjectInfo(ctx)
if err == nil {
return nil
}
// Upload it if not
fs.Debugf(o, "Creating directory marker")
content := io.Reader(strings.NewReader(""))
err = o.Update(ctx, content, o)
if err != nil {
return fmt.Errorf("creating directory marker failed: %w", err)
}
// Now check parent directory exists
dir = path.Dir(dir)
if dir == "/" || dir == "." {
break
}
}
return nil
}
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir) bucket, _ := f.split(dir)
e := f.checkBucket(ctx, bucket) return f.makeBucket(ctx, bucket)
if e != nil {
return e
}
return f.createDirectoryMarker(ctx, bucket, dir)
}
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
return f.Mkdir(ctx, dir)
} }
// makeBucket creates the bucket if it doesn't exist // makeBucket creates the bucket if it doesn't exist
@@ -963,11 +836,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a // List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details. // service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx) _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
if f.opt.UserProject != "" {
list = list.UserProject(f.opt.UserProject)
}
_, err = list.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err == nil { if err == nil {
@@ -1002,11 +871,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly { if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL) insertBucket.PredefinedAcl(f.opt.BucketACL)
} }
insertBucket = insertBucket.Context(ctx) _, err = insertBucket.Context(ctx).Do()
if f.opt.UserProject != "" {
insertBucket = insertBucket.UserProject(f.opt.UserProject)
}
_, err = insertBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}, nil) }, nil)
@@ -1026,28 +891,12 @@ func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
// to delete was not empty. // to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir) bucket, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
}
fs.Debugf(o, "Removing directory marker")
err := o.Remove(ctx)
if err != nil {
return fmt.Errorf("removing directory marker failed: %w", err)
}
}
if bucket == "" || directory != "" { if bucket == "" || directory != "" {
return nil return nil
} }
return f.cache.Remove(bucket, func() error { return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx) err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
if f.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
}) })
@@ -1069,7 +918,7 @@ func (f *Fs) Precision() time.Duration {
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote) dstBucket, dstPath := f.split(remote)
err := f.mkdirParent(ctx, remote) err := f.checkBucket(ctx, dstBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1093,11 +942,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var rewriteResponse *storage.RewriteResponse var rewriteResponse *storage.RewriteResponse
for { for {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
rewriteRequest = rewriteRequest.Context(ctx) rewriteResponse, err = rewriteRequest.Context(ctx).Do()
if f.opt.UserProject != "" {
rewriteRequest.UserProject(f.opt.UserProject)
}
rewriteResponse, err = rewriteRequest.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1207,17 +1052,8 @@ func (o *Object) setMetaData(info *storage.Object) {
// readObjectInfo reads the definition for an object // readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
return o.fs.readObjectInfo(ctx, bucket, bucketPath) err = o.fs.pacer.Call(func() (bool, error) {
} object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
// readObjectInfo reads the definition for an object
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
err = f.pacer.Call(func() (bool, error) {
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
if f.opt.UserProject != "" {
get = get.UserProject(f.opt.UserProject)
}
object, err = get.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1289,11 +1125,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
} }
copyObject = copyObject.Context(ctx) newObject, err = copyObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = copyObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1310,9 +1142,6 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.UserProject != "" {
o.url = o.url + "&userProject=" + o.fs.opt.UserProject
}
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil) req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1356,14 +1185,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
// Create parent dir/bucket if not saving directory marker err := o.fs.checkBucket(ctx, bucket)
if !strings.HasSuffix(o.remote, "/") { if err != nil {
err = o.fs.mkdirParent(ctx, o.remote) return err
if err != nil {
return err
}
} }
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
@@ -1408,11 +1234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
insertObject = insertObject.Context(ctx) newObject, err = insertObject.Context(ctx).Do()
if o.fs.opt.UserProject != "" {
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
}
newObject, err = insertObject.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
@@ -1427,11 +1249,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx) err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
if o.fs.opt.UserProject != "" {
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
}
err = deleteBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(ctx, err)
}) })
return err return err

View File

@@ -6,7 +6,6 @@ import (
"testing" "testing"
"github.com/rclone/rclone/backend/googlecloudstorage" "github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
NilObject: (*googlecloudstorage.Object)(nil), NilObject: (*googlecloudstorage.Object)(nil),
}) })
} }
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestGoogleCloudStorage"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*googlecloudstorage.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
},
})
}

View File

@@ -3,7 +3,7 @@ package googlephotos
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"path" "path"
"testing" "testing"
@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) { t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx) in, err := dstObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
buf, err := io.ReadAll(in) buf, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, in.Close()) require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000) assert.True(t, len(buf) > 1000)
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil { if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err) fs.Errorf(nil, "%s: failed to import: %v", remote, err)
} }
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err) accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++ doneCount++
} }
}) })

View File

@@ -166,7 +166,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: true, UserMetadata: true,
PartialUploads: true,
} }
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs) f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)

View File

@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"time" "time"
@@ -117,7 +118,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() { defer func() {
_ = r.Close() _ = r.Close()
}() }()
if _, err = io.Copy(io.Discard, r); err != nil { if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err) fs.Infof(o, "update failed (copy): %v", err)
return err return err
} }

View File

@@ -19,10 +19,9 @@ func init() {
Description: "Hadoop distributed file system", Description: "Hadoop distributed file system",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "namenode", Name: "namenode",
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.", Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "username", Name: "username",
Help: "Hadoop user name.", Help: "Hadoop user name.",
@@ -30,7 +29,6 @@ func init() {
Value: "root", Value: "root",
Help: "Connect to hdfs as root.", Help: "Connect to hdfs as root.",
}}, }},
Sensitive: true,
}, { }, {
Name: "service_principal_name", Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode. Help: `Kerberos service principal name for the namenode.
@@ -38,16 +36,15 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\" (SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`, for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "data_transfer_protection", Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy. Help: `Kerberos data transfer protection: authentication|integrity|privacy.
Specifies whether or not authentication, data signature integrity Specifies whether or not authentication, data signature integrity
checks, and wire encryption are required when communicating with checks, and wire encryption is required when communicating the the
the datanodes. Possible values are 'authentication', 'integrity' datanodes. Possible values are 'authentication', 'integrity' and
and 'privacy'. Used only with KERBEROS enabled.`, 'privacy'. Used only with KERBEROS enabled.`,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "privacy", Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.", Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -294,6 +294,15 @@ func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType Cop
return &result, nil return &result, nil
} }
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and // moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful. // returns the resulting api-object if successful.
// //

View File

@@ -2,7 +2,7 @@
package hidrive package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less. // FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files or folders with longer names will throw an HTTP error: // Operations that create files oder folder with longer names will throw a HTTP error:
// - 422 Unprocessable Entity // - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable. // A more graceful way for rclone to handle this may be desirable.
@@ -338,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("could not access root-prefix: %w", err) return nil, fmt.Errorf("could not access root-prefix: %w", err)
} }
if item.Type != api.HiDriveObjectTypeDirectory { if item.Type != api.HiDriveObjectTypeDirectory {
return nil, errors.New("the root-prefix needs to point to a valid directory or be empty") return nil, errors.New("The root-prefix needs to point to a valid directory or be empty")
} }
} }

View File

@@ -13,6 +13,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -304,7 +305,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.head(ctx) err := o.stat(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -316,6 +317,15 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote) return f.endpointURL + rest.URLPathEscape(remote)
} }
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName // Errors returned by parseName
var ( var (
errURLJoinFailed = errors.New("URLJoin failed") errURLJoinFailed = errors.New("URLJoin failed")
@@ -490,12 +500,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f, fs: f,
remote: remote, remote: remote,
} }
switch err := file.head(ctx); err { switch err := file.stat(ctx); err {
case nil: case nil:
add(file) add(file)
case fs.ErrorNotAFile: case fs.ErrorNotAFile:
// ...found a directory not a file // ...found a directory not a file
add(fs.NewDir(remote, time.Time{})) add(fs.NewDir(remote, timeUnset))
default: default:
fs.Debugf(remote, "skipping because of error: %v", err) fs.Debugf(remote, "skipping because of error: %v", err)
} }
@@ -507,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name = strings.TrimRight(name, "/") name = strings.TrimRight(name, "/")
remote := path.Join(dir, name) remote := path.Join(dir, name)
if isDir { if isDir {
add(fs.NewDir(remote, time.Time{})) add(fs.NewDir(remote, timeUnset))
} else { } else {
in <- remote in <- remote
} }
@@ -569,8 +579,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote) return o.fs.url(o.remote)
} }
// head sends a HEAD request to update info fields in the Object // stat updates the info field in the Object
func (o *Object) head(ctx context.Context) error { func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead { if o.fs.opt.NoHead {
o.size = -1 o.size = -1
o.modTime = timeUnset o.modTime = timeUnset
@@ -591,19 +601,13 @@ func (o *Object) head(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to stat: %w", err) return fmt.Errorf("failed to stat: %w", err)
} }
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified")) t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil { if err != nil {
t = timeUnset t = timeUnset
} }
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t o.modTime = t
o.contentType = res.Header.Get("Content-Type") o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory // If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash { if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType) mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -649,9 +653,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil { if err != nil {
return nil, fmt.Errorf("Open failed: %w", err) return nil, fmt.Errorf("Open failed: %w", err)
} }
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil return res.Body, nil
} }

View File

@@ -3,7 +3,7 @@ package http
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@@ -33,21 +33,20 @@ var (
lineEndSize = 1 lineEndSize = 1
) )
// prepareServer prepares the test server and shuts it down automatically // prepareServer the test server and return a function to tidy it up afterwards
// when the test completes. func prepareServer(t *testing.T) (configmap.Simple, func()) {
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files // file server for test/files
fileServer := http.FileServer(http.Dir(filesPath)) fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings // verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there // are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting) // we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath) fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(fileList), 0) require.Greater(t, len(fileList), 0)
for _, file := range fileList { for _, file := range fileList {
if !file.IsDir() { if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name())) data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") { if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2 lineEndSize = 2
} }
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
"url": ts.URL, "url": ts.URL,
"headers": strings.Join(headers, ","), "headers": strings.Join(headers, ","),
} }
t.Cleanup(ts.Close)
return m // return a function to tidy up
return m, ts.Close
} }
// prepare prepares the test server and shuts it down automatically // prepare the test server and return a function to tidy it up afterwards
// when the test completes. func prepare(t *testing.T) (fs.Fs, func()) {
func prepare(t *testing.T) fs.Fs { m, tidy := prepareServer(t)
m := prepareServer(t)
// Instantiate it // Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m) f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err) require.NoError(t, err)
return f return f, tidy
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
} }
func TestListRoot(t *testing.T) { func TestListRoot(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false) testListRoot(t, f, false)
} }
func TestListRootNoSlash(t *testing.T) { func TestListRootNoSlash(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true) testListRoot(t, f, true)
} }
func TestListSubDir(t *testing.T) { func TestListSubDir(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three") entries, err := f.List(context.Background(), "three")
require.NoError(t, err) require.NoError(t, err)
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
} }
func TestNewObject(t *testing.T) { func TestNewObject(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -192,69 +194,36 @@ func TestNewObject(t *testing.T) {
} }
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
m := prepareServer(t) f, tidy := prepare(t)
defer tidy()
for _, head := range []bool{false, true} { o, err := f.NewObject(context.Background(), "four/under four.txt")
if !head { require.NoError(t, err)
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} { // Test normal read
o, err := f.NewObject(context.Background(), "four/under four.txt") fd, err := o.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
if !head { require.NoError(t, err)
// Test mod time is still indeterminate require.NoError(t, fd.Close())
tObj := o.ModTime(context.Background()) if lineEndSize == 2 {
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj)) assert.Equal(t, "beetroot\r\n", string(data))
} else {
// Test file size is still indeterminate assert.Equal(t, "beetroot\n", string(data))
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
} }
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
} }
func TestMimeType(t *testing.T) { func TestMimeType(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -265,7 +234,8 @@ func TestMimeType(t *testing.T) {
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m) f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
@@ -274,7 +244,8 @@ func TestIsAFileRoot(t *testing.T) {
} }
func TestIsAFileSubDir(t *testing.T) { func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m) f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)

62
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,62 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

200
backend/hubic/hubic.go Normal file
View File

@@ -0,0 +1,200 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -0,0 +1,19 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -133,13 +133,11 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: "access_key_id", Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php", Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
Sensitive: true,
}, { }, {
Name: "secret_access_key", Name: "secret_access_key",
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.", Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
Sensitive: true,
}, { }, {
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two // their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
Name: "endpoint", Name: "endpoint",

View File

@@ -12,6 +12,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@@ -74,10 +75,6 @@ const (
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token" tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth" tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop" tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
) )
// Register with Fs // Register with Fs
@@ -88,7 +85,7 @@ func init() {
Description: "Jottacloud", Description: "Jottacloud",
NewFs: NewFs, NewFs: NewFs,
Config: Config, Config: Config,
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -123,7 +120,7 @@ func init() {
Default: (encoder.Display | Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }
@@ -143,9 +140,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
}, { }, {
Value: "tele2", Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.", Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}}) }})
case "auth_type_done": case "auth_type_done":
// Jump to next state according to config chosen // Jump to next state according to config chosen
@@ -268,21 +262,6 @@ machines.`)
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },
}) })
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device": case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint? return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive Choosing no, the default, will let you access the storage used for the archive
@@ -843,7 +822,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) { func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() { if legacyTokenURL == req.URL.String() {
// read the entire body // read the entire body
refreshBody, err := io.ReadAll(req.Body) refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil { if err != nil {
return return
} }
@@ -853,7 +832,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1)) refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close()) // set the new ReadCloser (with a dummy Close())
req.Body = io.NopCloser(bytes.NewReader(refreshBody)) req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
} }
} }
@@ -1810,7 +1789,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File var tempFile *os.File
// create the cache file // create the cache file
tempFile, err = os.CreateTemp("", cachePrefix) tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil { if err != nil {
return return
} }
@@ -1838,7 +1817,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else { } else {
// that's a small file, just read it into memory // that's a small file, just read it into memory
var inData []byte var inData []byte
inData, err = io.ReadAll(teeReader) inData, err = ioutil.ReadAll(teeReader)
if err != nil { if err != nil {
return return
} }
@@ -1860,12 +1839,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil { if err == nil {
// if the object exists delete it // if the object exists delete it
err = o.remove(ctx, true) err = o.remove(ctx, true)
if err != nil && err != fs.ErrorObjectNotFound { if err != nil {
// if delete failed then report that, unless it was because the file did not exist after all
return fmt.Errorf("failed to remove old object: %w", err) return fmt.Errorf("failed to remove old object: %w", err)
} }
} else if err != fs.ErrorObjectNotFound { }
// if the object does not exist we can just continue but if the error is something different we should report that // if the object does not exist we can just continue but if the error is something different we should report that
if err != fs.ErrorObjectNotFound {
return err return err
} }
} }
@@ -1935,7 +1914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :) // copy the already uploaded bytes into the trash :)
var result api.UploadResponse var result api.UploadResponse
_, err = io.CopyN(io.Discard, in, response.ResumePos) _, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil { if err != nil {
return err return err
} }
@@ -1952,7 +1931,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5 o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0) o.modTime = time.Unix(result.Modified/1000, 0)
} else { } else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata // If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true) return o.readMetaData(ctx, true)
} }
@@ -1973,17 +1952,10 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
opts.Parameters.Set("dl", "true") opts.Parameters.Set("dl", "true")
} }
err := o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil) resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if apiErr, ok := err.(*api.Error); ok {
// attempting to hard delete will fail if path does not exist, but standard delete will succeed
if apiErr.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
} }
// Remove an object // Remove an object

View File

@@ -61,10 +61,9 @@ func init() {
Default: true, Default: true,
Advanced: true, Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your user name.", Help: "Your user name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).", Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
@@ -377,7 +376,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i, file := range files { for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name)) remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" { if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Time{}) entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else { } else {
entries[i] = &Object{ entries[i] = &Object{
fs: f, fs: f,

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -123,8 +124,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload. Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy - are being uploaded and aborts with a message which starts "can't copy
source file is being updated" if the file changes during upload. - source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g. However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -266,10 +267,7 @@ type Object struct {
// ------------------------------------------------------------ // ------------------------------------------------------------
var ( var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path // NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -302,8 +300,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
PartialUploads: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.FollowSymlinks { if opt.FollowSymlinks {
f.lstat = os.Stat f.lstat = os.Stat
@@ -314,16 +310,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err == nil { if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem) f.dev = readDevice(fi, f.opt.OneFileSystem)
} }
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) { if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
return nil, errLinksNeedsSuffix
}
// It is a file, so use the parent as the root // It is a file, so use the parent as the root
f.root = filepath.Dir(f.root) f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -516,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue continue
} }
} }
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr) err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
@@ -533,14 +520,15 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name() name := fi.Name()
mode := fi.Mode() mode := fi.Mode()
newRemote := f.cleanRemote(dir, name) newRemote := f.cleanRemote(dir, name)
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
// Follow symlinks if required // Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name) localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath) fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) { if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks // Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err)) err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
@@ -565,11 +553,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix newRemote += linkSuffix
} }
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !filter.IncludeRemote(newRemote) {
continue
}
fso, err := f.newObjectWithInfo(newRemote, fi) fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -662,7 +645,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second precision = time.Second
// Create temporary file and test it // Create temporary file and test it
fd, err := os.CreateTemp("", "rclone") fd, err := ioutil.TempFile("", "rclone")
if err != nil { if err != nil {
// If failed return 1s // If failed return 1s
// fmt.Println("Failed to create temp file", err) // fmt.Println("Failed to create temp file", err)
@@ -1089,7 +1072,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil { if err != nil {
return nil, err return nil, err
} }
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
} }
// Open an object for read // Open an object for read
@@ -1416,27 +1399,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
} }
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string { func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") { if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) { if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s) s2, err := filepath.Abs(s)
if err == nil { if err == nil {
s = s2 s = s2
} }
} else {
s = filepath.Clean(s)
} }
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s) s = filepath.ToSlash(s)
vol := filepath.VolumeName(s) vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):]) s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s) s = filepath.FromSlash(s)
if !noUNC { if !noUNC {
// Convert to UNC // Convert to UNC
s = file.UNCPath(s) s = file.UNCPath(s)
} }
return s return s
} }
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s) s = enc.FromStandardPath(s)
return s return s
} }

View File

@@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -14,12 +14,10 @@ import (
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -35,6 +33,7 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating // Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) { func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test" filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now()) r.WriteFile(filePath, "content", time.Now())
@@ -79,6 +78,7 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
dir := f.root dir := f.root
@@ -147,24 +147,10 @@ func TestSymlink(t *testing.T) {
_, err = r.Flocal.NewObject(ctx, "symlink2.txt") _, err = r.Flocal.NewObject(ctx, "symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
require.Equal(t, dir, f2.(*Fs).root)
// Check that NewFs doesn't see the non suffixed version with --links
f2, err = NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"), configmap.Simple{
"links": "true",
})
require.Equal(t, errLinksNeedsSuffix, err)
require.Nil(t, f2)
// Check reading the object // Check reading the object
in, err := o.Open(ctx) in, err := o.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
contents, err := io.ReadAll(in) contents, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt", string(contents)) require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -172,7 +158,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range // Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err) require.NoError(t, err)
contents, err = io.ReadAll(in) contents, err = ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents)) require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -191,6 +177,7 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) { func TestHashOnUpdate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -221,6 +208,7 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) { func TestHashOnDelete(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -249,6 +237,7 @@ func TestHashOnDelete(t *testing.T) {
func TestMetadata(t *testing.T) { func TestMetadata(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "metafile.txt" const filePath = "metafile.txt"
when := time.Now() when := time.Now()
const dayLength = len("2001-01-01") const dayLength = len("2001-01-01")
@@ -383,14 +372,12 @@ func TestMetadata(t *testing.T) {
func TestFilter(t *testing.T) { func TestFilter(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now() when := time.Now()
r.WriteFile("included", "included file", when) r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when) r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter // Add a filter
ctx, fi := filter.AddConfig(ctx) ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included")) require.NoError(t, fi.AddRule("+ included"))
@@ -411,147 +398,3 @@ func TestFilter(t *testing.T) {
sort.Sort(entries) sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries)) require.Equal(t, "[included]", fmt.Sprint(entries))
} }
func testFilterSymlink(t *testing.T, copyLinks bool) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
if copyLinks {
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
} else {
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
}
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
if copyLinks {
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
} else {
require.NoError(t, fi.AddRule("+ included.file.link.rclonelink"))
require.NoError(t, fi.AddRule("+ included.dir.link.rclonelink"))
}
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
if copyLinks {
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
} else {
// Check 0 global errors as dangling symlink copied properly
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
}
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[dangling.link.rclonelink included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
if copyLinks {
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
} else {
require.Equal(t, "[included.dir included.dir.link.rclonelink included.file included.file.link.rclonelink]", fmt.Sprint(entries))
}
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}
func TestFilterSymlinkCopyLinks(t *testing.T) {
testFilterSymlink(t, true)
}
func TestFilterSymlinkLinks(t *testing.T) {
testFilterSymlink(t, false)
}
func TestCopySymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file and a symlink to it
r.WriteFile("src/file.txt", "hello world", when)
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
defer func() {
// Reset -L/-l mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = false
f.lstat = os.Lstat
}()
// Set fs into "-l/--links" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
// Create dst
require.NoError(t, f.Mkdir(ctx, "dst"))
// Do copy from src into dst
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
require.NoError(t, err)
require.NotNil(t, src)
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
require.NoError(t, err)
require.NotNil(t, dst)
// Test that we made a symlink and it has the right contents
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
linkContents, err := os.Readlink(dstPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkContents)
}

View File

@@ -5,42 +5,19 @@ package local
import ( import (
"fmt" "fmt"
"runtime"
"sync"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible // Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) { func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if runtime.GOOS != "android" && unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks { if o.fs.opt.FollowSymlinks {
flags = 0 flags = 0
} }
var stat unix.Statx_t var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 | err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT unix.STATX_MODE | // Want stx_mode & ~S_IFMT
@@ -68,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
setTime("btime", stat.Btime) setTime("btime", stat.Btime)
return nil return nil
} }
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unnecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,6 +1,7 @@
package local package local
import ( import (
"io/ioutil"
"os" "os"
"sync" "sync"
"testing" "testing"
@@ -12,7 +13,7 @@ import (
// Check we can remove an open file // Check we can remove an open file
func TestRemove(t *testing.T) { func TestRemove(t *testing.T) {
fd, err := os.CreateTemp("", "rclone-remove-test") fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err) require.NoError(t, err)
name := fd.Name() name := fd.Name()
defer func() { defer func() {

View File

@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))]) w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
} }
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string // WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) { func (w *BinWriter) WriteString(str string) {
buf := []byte(str) buf := []byte(str)

View File

@@ -18,6 +18,7 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
@@ -85,19 +86,13 @@ func init() {
Name: "mailru", Name: "mailru",
Description: "Mail.ru Cloud", Description: "Mail.ru Cloud",
NewFs: NewFs, NewFs: NewFs,
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name (usually email).", Help: "User name (usually email).",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: `Password. Help: "Password.",
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Required: true, Required: true,
IsPassword: true, IsPassword: true,
}, { }, {
@@ -214,7 +209,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
encoder.EncodeWin | // :?"*<>| encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }
@@ -646,7 +641,12 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
return nil, -1, err return nil, -1, err
} }
modTime := time.Unix(int64(item.Mtime), 0) mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote) isDir, err := f.isDir(item.Kind, remote)
if err != nil { if err != nil {
@@ -1660,7 +1660,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory // Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) { if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = io.ReadAll(in) fileBuf, err = ioutil.ReadAll(in)
if err != nil { if err != nil {
return err return err
} }
@@ -1703,7 +1703,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size { if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer. // Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil { if fileBuf == nil {
fileBuf, err = io.ReadAll(wrapIn) fileBuf, err = ioutil.ReadAll(wrapIn)
} }
if fileHash == nil && err == nil { if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf) fileHash = mrhash.Sum(fileBuf)
@@ -2058,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath())) req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size) req.WritePu64(o.size)
req.WriteP64(o.modTime.Unix()) req.WritePu64(o.modTime.Unix())
req.WritePu32(0) req.WritePu32(0)
req.Write(o.mrHash) req.Write(o.mrHash)
@@ -2214,7 +2214,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range") fs.Debugf(o, "Server returned full content instead of range")
if start > 0 { if start > 0 {
// Discard the beginning of the data // Discard the beginning of the data
_, err = io.CopyN(io.Discard, wrapStream, start) _, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil { if err != nil {
closeBody(res) closeBody(res)
return nil, err return nil, err

View File

@@ -58,10 +58,9 @@ func init() {
Description: "Mega", Description: "Mega",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "user", Name: "user",
Help: "User name.", Help: "User name.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "pass", Name: "pass",
Help: "Password.", Help: "Password.",
@@ -84,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`, permanently delete objects instead.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necessary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -112,7 +100,6 @@ type Options struct {
Pass string `config:"pass"` Pass string `config:"pass"`
Debug bool `config:"debug"` Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -217,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil { if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx)) srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) { srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...) fs.Infof("*go-mega*", format, v...)
}) })

View File

@@ -8,6 +8,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"strings" "strings"
"sync" "sync"
@@ -574,7 +575,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
data = data[:limit] data = data[:limit]
} }
return io.NopCloser(bytes.NewBuffer(data)), nil return ioutil.NopCloser(bytes.NewBuffer(data)), nil
} }
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
@@ -582,7 +583,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
data, err := io.ReadAll(in) data, err := ioutil.ReadAll(in)
if err != nil { if err != nil {
return fmt.Errorf("failed to update memory object: %w", err) return fmt.Errorf("failed to update memory object: %w", err)
} }

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@@ -65,13 +66,11 @@ HTTP is provided primarily for debugging purposes.`,
Help: `Domain+path of NetStorage host to connect to. Help: `Domain+path of NetStorage host to connect to.
Format should be ` + "`<domain>/<internal folders>`", Format should be ` + "`<domain>/<internal folders>`",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "account", Name: "account",
Help: "Set the NetStorage account name", Help: "Set the NetStorage account name",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "secret", Name: "secret",
Help: `Set the NetStorage account secret/G2O key for authentication. Help: `Set the NetStorage account secret/G2O key for authentication.
@@ -821,8 +820,6 @@ func (f *Fs) getAuth(req *http.Request) error {
// Set Authorization header // Set Authorization header
dataHeader := generateDataHeader(f) dataHeader := generateDataHeader(f)
path := req.URL.RequestURI() path := req.URL.RequestURI()
//lint:ignore SA1008 false positive when running staticcheck, the header name is according to docs even if not canonical
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1008
actionHeader := req.Header["X-Akamai-ACS-Action"][0] actionHeader := req.Header["X-Akamai-ACS-Action"][0]
fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path) fs.Debugf(nil, "NetStorage API %s call %s for path %q", req.Method, actionHeader, path)
req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader) req.Header.Set("X-Akamai-ACS-Auth-Data", dataHeader)
@@ -975,7 +972,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
URL = o.fs.url(src.Remote()) URL = o.fs.url(src.Remote())
} }
if strings.HasSuffix(URL, ".rclonelink") { if strings.HasSuffix(URL, ".rclonelink") {
bits, err := io.ReadAll(in) bits, err := ioutil.ReadAll(in)
if err != nil { if err != nil {
return err return err
} }
@@ -1061,7 +1058,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" { if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL) fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
reader := strings.NewReader(o.target) reader := strings.NewReader(o.target)
readcloser := io.NopCloser(reader) readcloser := ioutil.NopCloser(reader)
return readcloser, nil return readcloser, nil
} }

View File

@@ -126,7 +126,6 @@ type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available) Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available) Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available) QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
} }
// FileFacet groups file-related data on OneDrive into a single structure. // FileFacet groups file-related data on OneDrive into a single structure.

View File

@@ -131,11 +131,10 @@ Note that the chunks will be buffered into memory.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_id", Name: "drive_id",
Help: "The ID of the drive to use.", Help: "The ID of the drive to use.",
Default: "", Default: "",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "drive_type", Name: "drive_type",
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").", Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
@@ -149,8 +148,7 @@ This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get know the folder ID that you wish to access but not be able to get
there through a path traversal. there through a path traversal.
`, `,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "access_scopes", Name: "access_scopes",
Help: `Set scopes to be requested by rclone. Help: `Set scopes to be requested by rclone.
@@ -198,9 +196,7 @@ listing, set this option.`,
}, { }, {
Name: "server_side_across_configs", Name: "server_side_across_configs",
Default: false, Default: false,
Help: `Deprecated: use --server-side-across-configs instead. Help: `Allow server-side operations (e.g. copy) to work across different onedrive configs.
Allow server-side operations (e.g. copy) to work across different onedrive configs.
This will only work if you are copying between two OneDrive *Personal* drives AND This will only work if you are copying between two OneDrive *Personal* drives AND
the files to copy are already shared between them. In other cases, rclone will the files to copy are already shared between them. In other cases, rclone will
@@ -261,67 +257,6 @@ this flag there.
Help: `Set the password for links created by the link command. Help: `Set the password for links created by the link command.
At the time of writing this only works with OneDrive personal paid accounts. At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
If the hash requested does not exist on the object, it will be
returned as an empty string which is treated as a missing hash by
rclone.
`,
Examples: []fs.OptionExample{{
Value: "auto",
Help: "Rclone chooses the best hash",
}, {
Value: "quickxor",
Help: "QuickXor",
}, {
Value: "sha1",
Help: "SHA1",
}, {
Value: "sha256",
Help: "SHA256",
}, {
Value: "crc32",
Help: "CRC32",
}, {
Value: "none",
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: "av_override",
Default: false,
Help: `Allows download of files the server thinks has a virus.
The onedrive/sharepoint server may check files uploaded with an Anti
Virus checker. If it detects any potential viruses or malware it will
block download of the file.
In this case you will see a message like this
server reports this file is infected with a virus - use --onedrive-av-override to download anyway: Infected (name of virus): 403 Forbidden:
If you are 100% sure you want to download this file anyway then use
the --onedrive-av-override flag, or av_override = true in the config
file.
`, `,
Advanced: true, Advanced: true,
}, { }, {
@@ -576,7 +511,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`) `)
case "url_end": case "url_end":
siteURL := config.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`) re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -662,8 +597,6 @@ type Options struct {
LinkScope string `config:"link_scope"` LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"` LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"` LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
AVOverride bool `config:"av_override"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -680,7 +613,6 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
hashType hash.Type // type of the hash we are using
} }
// Object describes a OneDrive object // Object describes a OneDrive object
@@ -694,7 +626,8 @@ type Object struct {
size int64 // size of the object size int64 // size of the object
modTime time.Time // modification time of the object modTime time.Time // modification time of the object
id string // ID of the object id string // ID of the object
hash string // Hash of the content, usually QuickXorHash but set as hash_type sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
mimeType string // Content-Type of object from server (may not be as uploaded) mimeType string // Content-Type of object from server (may not be as uploaded)
} }
@@ -949,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
driveType: opt.DriveType, driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
hashType: QuickXorHashType,
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -959,21 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f) }).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
// Set the user defined hash
if opt.HashType == "auto" || opt.HashType == "" {
opt.HashType = QuickXorHashType.String()
}
err = f.hashType.Set(opt.HashType)
if err != nil {
return nil, err
}
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
f.features.ChangeNotify = nil
}
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, _, err := f.readMetaDataForPath(ctx, "") _, _, err := f.readMetaDataForPath(ctx, "")
@@ -1633,7 +1550,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.Set(f.hashType) if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
} }
// PublicLink returns a link for downloading without account. // PublicLink returns a link for downloading without account.
@@ -1748,10 +1668,6 @@ func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, f.ci.Checkers) token := make(chan struct{}, f.ci.Checkers)
var wg sync.WaitGroup var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error { err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Errorf(f, "Failed to list %q: %v", path, err)
return nil
}
err = entries.ForObjectError(func(obj fs.Object) error { err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object) o, ok := obj.(*Object)
if !ok { if !ok {
@@ -1846,8 +1762,14 @@ func (o *Object) rootPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == o.fs.hashType { if o.fs.driveType == driveTypePersonal {
return o.hash, nil if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
return o.quickxorhash, nil
}
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1878,23 +1800,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
file := info.GetFile() file := info.GetFile()
if file != nil { if file != nil {
o.mimeType = file.MimeType o.mimeType = file.MimeType
o.hash = "" if file.Hashes.Sha1Hash != "" {
switch o.fs.hashType { o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
case QuickXorHashType: }
if file.Hashes.QuickXorHash != "" { if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash) h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil { if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err) fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else { } else {
o.hash = hex.EncodeToString(h) o.quickxorhash = hex.EncodeToString(h)
}
} }
case hash.SHA1:
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
case hash.SHA256:
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
case hash.CRC32:
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
} }
} }
fileSystemInfo := info.GetFileSystemInfo() fileSystemInfo := info.GetFileSystemInfo()
@@ -1990,20 +1905,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response var resp *http.Response
opts := o.fs.newOptsCall(o.id, "GET", "/content") opts := o.fs.newOptsCall(o.id, "GET", "/content")
opts.Options = options opts.Options = options
if o.fs.opt.AVOverride {
opts.Parameters = url.Values{"AVOverride": {"1"}}
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
if resp != nil {
if virus := resp.Header.Get("X-Virus-Infected"); virus != "" {
err = fmt.Errorf("server reports this file is infected with a virus - use --onedrive-av-override to download anyway: %s: %w", virus, err)
}
}
return nil, err return nil, err
} }

View File

@@ -7,40 +7,51 @@
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash // See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash package quickxorhash
// This code was ported from a fast C-implementation from // This code was ported from the code snippet linked from
// https://github.com/namazso/QuickXorHash // https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// which has licenced as BSD Zero Clause License // Which has the copyright
//
// BSD Zero Clause License
//
// Copyright (c) 2022 namazso <admin@namazso.eu>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
// PERFORMANCE OF THIS SOFTWARE.
import "hash" // ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
const ( const (
// BlockSize is the preferred size for hashing // BlockSize is the preferred size for hashing
BlockSize = 64 BlockSize = 64
// Size of the output checksum // Size of the output checksum
Size = 20 Size = 20
shift = 11 bitsInLastCell = 32
widthInBits = 8 * Size shift = 11
dataSize = shift * widthInBits widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
) )
type quickXorHash struct { type quickXorHash struct {
data [dataSize]byte data [dataSize]uint64
size uint64 lengthSoFar uint64
shiftSoFar int
} }
// New returns a new hash.Hash computing the quickXorHash checksum. // New returns a new hash.Hash computing the quickXorHash checksum.
@@ -59,37 +70,94 @@ func New() hash.Hash {
// //
// Implementations must not retain p. // Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) { func (q *quickXorHash) Write(p []byte) (n int, err error) {
var i int currentshift := q.shiftSoFar
// fill last remain
lastRemain := q.size % dataSize // The bitvector where we'll start xoring
if lastRemain != 0 { vectorArrayIndex := currentshift / 64
i += xorBytes(q.data[lastRemain:], p)
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
} }
if i != len(p) { for i := 0; i < iterations; i++ {
for len(p)-i >= dataSize { isLastCell := vectorArrayIndex == len(q.data)-1
i += xorBytes(q.data[:], p[i:]) var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
} }
xorBytes(q.data[:], p[i:])
} }
q.size += uint64(len(p))
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
return len(p), nil return len(p), nil
} }
// Calculate the current checksum // Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size + 1]byte) { func (q *quickXorHash) checkSum() (h [Size]byte) {
for i := 0; i < dataSize; i++ { // Output the data as little endian bytes
shift := (i * 11) % 160 ph := 0
shiftBytes := shift / 8 for i := 0; i < len(q.data)-1; i++ {
shiftBits := shift % 8 d := q.data[i]
shifted := int(q.data[i]) << shiftBits _ = h[ph+7] // bounds check
h[shiftBytes] ^= byte(shifted) h[ph+0] = byte(d >> (8 * 0))
h[shiftBytes+1] ^= byte(shifted >> 8) h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
} }
h[0] ^= h[20] // remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
// XOR the file length with the least significant bits in little endian format // XOR the file length with the least significant bits in little endian format
d := q.size d = q.lengthSoFar
h[Size-8] ^= byte(d >> (8 * 0)) h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1)) h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2)) h[Size-6] ^= byte(d >> (8 * 2))
@@ -106,7 +174,7 @@ func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
// It does not change the underlying hash state. // It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte { func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum() hash := q.checkSum()
return append(b, hash[:Size]...) return append(b, hash[:]...)
} }
// Reset resets the Hash to its initial state. // Reset resets the Hash to its initial state.
@@ -128,10 +196,8 @@ func (q *quickXorHash) BlockSize() int {
} }
// Sum returns the quickXorHash checksum of the data. // Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) (h [Size]byte) { func Sum(data []byte) [Size]byte {
var d quickXorHash var d quickXorHash
_, _ = d.Write(data) _, _ = d.Write(data)
s := d.checkSum() return d.checkSum()
copy(h[:], s[:])
return h
} }

View File

@@ -4,7 +4,6 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"hash" "hash"
"math/rand"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -167,16 +166,3 @@ func TestReset(t *testing.T) {
// check interface // check interface
var _ hash.Hash = (*quickXorHash)(nil) var _ hash.Hash = (*quickXorHash)(nil)
func BenchmarkQuickXorHash(b *testing.B) {
b.SetBytes(1 << 20)
buf := make([]byte, 1<<20)
rand.Read(buf)
h := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)
}
}

View File

@@ -1,20 +0,0 @@
//go:build !go1.20
package quickxorhash
func xorBytes(dst, src []byte) int {
n := len(dst)
if len(src) < n {
n = len(src)
}
if n == 0 {
return 0
}
dst = dst[:n]
//src = src[:n]
src = src[:len(dst)] // remove bounds check in loop
for i := range dst {
dst[i] ^= src[i]
}
return n
}

View File

@@ -1,9 +0,0 @@
//go:build go1.20
package quickxorhash
import "crypto/subtle"
func xorBytes(dst, src []byte) int {
return subtle.XORBytes(dst, src, dst)
}

View File

@@ -42,10 +42,9 @@ func init() {
Description: "OpenDrive", Description: "OpenDrive",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
Help: "Username.", Help: "Username.",
Required: true, Required: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Password.", Help: "Password.",

View File

@@ -1,145 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"os"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
)
const (
sseDefaultAlgorithm = "AES256"
)
func getSha256(p []byte) []byte {
h := sha256.New()
h.Write(p)
return h.Sum(nil)
}
func validateSSECustomerKeyOptions(opt *Options) error {
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
}
if opt.SSEKMSKeyID != "" {
return nil
}
err := populateSSECustomerKeys(opt)
if err != nil {
return err
}
return nil
}
func populateSSECustomerKeys(opt *Options) error {
if opt.SSECustomerKeyFile != "" {
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
if err != nil {
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
}
opt.SSECustomerKey = strings.TrimSpace(string(data))
}
if opt.SSECustomerKey != "" {
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
if err != nil {
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
}
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
if opt.SSECustomerKeySha256 == "" {
opt.SSECustomerKeySha256 = sha256Checksum
} else {
if opt.SSECustomerKeySha256 != sha256Checksum {
return fmt.Errorf("the computed SHA256 checksum "+
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
sha256Checksum, opt.SSECustomerKeySha256)
}
}
if opt.SSECustomerAlgorithm == "" {
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
}
}
return nil
}
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -1,178 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/rsa"
"errors"
"net/http"
"os"
"path"
"strings"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/common/auth"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
)
func expandPath(filepath string) (expandedPath string) {
if filepath == "" {
return filepath
}
cleanedPath := path.Clean(filepath)
expandedPath = cleanedPath
if strings.HasPrefix(cleanedPath, "~") {
rest := cleanedPath[2:]
home, err := os.UserHomeDir()
if err != nil {
return expandedPath
}
expandedPath = path.Join(home, rest)
}
return
}
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
switch opt.Provider {
case instancePrincipal:
return auth.InstancePrincipalConfigurationProvider()
case userPrincipal:
expandConfigFilePath := expandPath(opt.ConfigFile)
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
}
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
case resourcePrincipal:
return auth.ResourcePrincipalConfigurationProvider()
case noAuth:
fs.Infof("client", "using no auth provider")
return getNoAuthConfiguration()
default:
}
return common.DefaultConfigProvider(), nil
}
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
p, err := getConfigurationProvider(opt)
if err != nil {
return nil, err
}
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
if err != nil {
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
return nil, err
}
if opt.Region != "" {
client.SetRegion(opt.Region)
}
modifyClient(ctx, opt, &client.BaseClient)
return &client, err
}
func fileExists(filePath string) bool {
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
return false
}
return true
}
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
client.HTTPClient = getHTTPClient(ctx)
if opt.Provider == noAuth {
client.Signer = getNoAuthSigner()
}
}
// getClient makes http client according to the global options
// this has rclone specific options support like dump headers, body etc.
func getHTTPClient(ctx context.Context) *http.Client {
return fshttp.NewClient(ctx)
}
var retryErrorCodes = []int{
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is an ocierr object, try and extract more useful information to determine if we should retry
if ociError, ok := err.(common.ServiceError); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(err) {
return true, err
}
// If it is a timeout then we want to retry that
if ociError.GetCode() == "RequestTimeout" {
return true, err
}
}
// Ok, not an oci error, check for generic failure conditions
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
return &noAuthConfigurator{}, nil
}
func getNoAuthSigner() common.HTTPRequestSigner {
return &noAuthSigner{}
}
type noAuthConfigurator struct {
}
type noAuthSigner struct {
}
func (n *noAuthSigner) Sign(*http.Request) error {
return nil
}
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
return nil, nil
}
func (n *noAuthConfigurator) KeyID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) UserOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) Region() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
return common.AuthConfig{
AuthType: common.UnknownAuthenticationType,
IsFromConfigFile: false,
OboToken: nil,
}, nil
}
// Check the interfaces are satisfied
var (
_ common.ConfigurationProvider = &noAuthConfigurator{}
_ common.HTTPRequestSigner = &noAuthSigner{}
)

View File

@@ -1,228 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Command Interface Implementation
// ------------------------------------------------------------
const (
operationRename = "rename"
operationListMultiPart = "list-multipart-uploads"
operationCleanup = "cleanup"
)
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage Examples:
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
},
}
/*
Command the backend to run a named command
The command run is name
args may be used to read arguments from
opts may be used to read optional arguments from
The result should be capable of being JSON encoded
If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
if len(args) < 2 {
return nil, fmt.Errorf("path to object or its new name to rename is empty")
}
remote := args[0]
newName := args[1]
return f.rename(ctx, remote, newName)
case operationListMultiPart:
return f.listMultipartUploadsAll(ctx)
case operationCleanup:
maxAge := 24 * time.Hour
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
default:
return nil, fs.ErrorCommandNotFound
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
if newName == "" {
return nil, fmt.Errorf("the object's new name cannot be empty")
}
o := &Object{
fs: f,
remote: remote,
}
bucketName, objectPath := o.split()
err := o.readMetaData(ctx)
if err != nil {
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
if strings.HasPrefix(objectPath, bucketName) {
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
objectPath, bucketName)
}
return nil, fs.ErrorNotAFile
}
details := objectstorage.RenameObjectDetails{
SourceName: common.String(objectPath),
NewName: common.String(newName),
}
request := objectstorage.RenameObjectRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
RenameObjectDetails: details,
OpcClientRequestId: nil,
RequestMetadata: common.RequestMetadata{},
}
var response objectstorage.RenameObjectResponse
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.RenameObject(ctx, request)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
return "renamed successfully", nil
}
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
err error) {
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
bucket, directory := f.split("")
if bucket != "" {
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
if err != nil {
return uploadsMap, err
}
uploadsMap[bucket] = uploads
return uploadsMap, nil
}
entries, err := f.listBuckets(ctx)
if err != nil {
return uploadsMap, err
}
for _, entry := range entries {
bucket := entry.Remote()
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
if listErr != nil {
err = listErr
fs.Errorf(f, "%v", err)
}
uploadsMap[bucket] = uploads
}
return uploadsMap, err
}
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
var response objectstorage.ListMultipartUploadsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploads(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
// fs.Debugf(f, "failed to list multi part uploads %v", err)
return uploads, err
}
for index, item := range response.Items {
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploads, nil
}

View File

@@ -1,156 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Implement Copier is an optional interfaces for Fs
//------------------------------------------------------------
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj)
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
srcBucket, srcPath := srcObj.split()
dstBucket, dstPath := dstObj.split()
if dstBucket != srcBucket {
exists, err := f.bucketExists(ctx, dstBucket)
if err != nil {
return err
}
if !exists {
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
}
}
copyObjectDetails := objectstorage.CopyObjectDetails{
SourceObjectName: common.String(srcPath),
DestinationRegion: common.String(dstObj.fs.opt.Region),
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
DestinationBucket: common.String(dstBucket),
DestinationObjectName: common.String(dstPath),
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
}
req := objectstorage.CopyObjectRequest{
NamespaceName: common.String(srcObj.fs.opt.Namespace),
BucketName: common.String(srcBucket),
CopyObjectDetails: copyObjectDetails,
}
useBYOKCopyObject(f, &req)
var resp objectstorage.CopyObjectResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CopyObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
workRequestID := resp.OpcWorkRequestId
timeout := time.Duration(f.opt.CopyTimeout)
dstName := dstObj.String()
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
// To enable server side copy object, customers will have to
// grant policy to objectstorage service to manage object-family
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
// Another option to avoid the policy is to download and reupload the file.
// This download upload will work for maximum file size limit of 5GB
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
if err != nil {
return err
}
return err
}
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
client *objectstorage.ObjectStorageClient) error {
stateConf := &StateChangeConf{
Pending: []string{
string(objectstorage.WorkRequestStatusAccepted),
string(objectstorage.WorkRequestStatusInProgress),
string(objectstorage.WorkRequestStatusCanceling),
},
Target: []string{
string(objectstorage.WorkRequestSummaryStatusCompleted),
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
wr := &workRequestResponse.WorkRequest
return workRequestResponse, string(wr.Status), err
},
Timeout: timeout,
}
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
if e != nil {
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
}
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
if wr.Status == objectstorage.WorkRequestStatusFailed {
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
}
return nil
}
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
req := objectstorage.ListWorkRequestErrorsRequest{}
req.WorkRequestId = workRequestID
res, err := client.ListWorkRequestErrors(ctx, req)
if err != nil {
return "", err
}
allErrs := make([]string, 0)
for _, errs := range res.Items {
allErrs = append(allErrs, *errs.Message)
}
errorMessage := strings.Join(allErrs, "\n")
return errorMessage, nil
}

View File

@@ -1,626 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
// Object Interface Implementation
// ------------------------------------------------------------
const (
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
// StandardTier object storage tier
ociMetaPrefix = "opc-meta-"
)
var archive = "archive"
var infrequentAccess = "infrequentaccess"
var standard = "standard"
var storageTierMap = map[string]*string{
archive: &archive,
infrequentAccess: &infrequentAccess,
standard: &standard,
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Object describes a oci bucket object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
md5 string // MD5 hash if known
bytes int64 // Size of the object
lastModified time.Time // The modified time of the object if known
meta map[string]string // The object metadata if known - may be nil
mimeType string // Content-Type of the object
// Metadata as pointers to strings as they often won't be present
storageTier *string // e.g. Standard
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) readMetaData(ctx context.Context) (err error) {
fs.Debugf(o, "trying to read metadata %v", o.remote)
if o.meta != nil {
return nil
}
info, err := o.headObject(ctx)
if err != nil {
return err
}
return o.decodeMetaDataHead(info)
}
// headObject gets the metadata from the object unconditionally
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
bucketName, objectPath := o.split()
req := objectstorage.HeadObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(objectPath),
}
useBYOKHeadObject(o.fs, &req)
var response objectstorage.HeadObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
var err error
response, err = o.fs.srv.HeadObject(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
if svcErr, ok := err.(common.ServiceError); ok {
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
}
fs.Errorf(o, "Failed to head object: %v", err)
return nil, err
}
o.fs.cache.MarkOK(bucketName)
return &response, err
}
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) setMetaData(
contentLength *int64,
contentMd5 *string,
contentType *string,
lastModified *common.SDKTime,
storageTier interface{},
meta map[string]string) error {
if contentLength != nil {
o.bytes = *contentLength
}
if contentMd5 != nil {
md5, err := o.base64ToMd5(*contentMd5)
if err == nil {
o.md5 = md5
}
}
o.meta = meta
if o.meta == nil {
o.meta = map[string]string{}
}
// Read MD5 from metadata if present
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
md5, err := o.base64ToMd5(md5sumBase64)
if err != nil {
o.md5 = md5
}
}
if lastModified == nil {
o.lastModified = time.Now()
fs.Logf(o, "Failed to read last modified")
} else {
o.lastModified = lastModified.Time
}
if contentType != nil {
o.mimeType = *contentType
}
if storageTier == nil || storageTier == "" {
o.storageTier = storageTierMap[standard]
} else {
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
o.storageTier = storageTierMap[tier]
}
return nil
}
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
if err != nil {
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
return "", err
} else if len(md5sumBytes) != 16 {
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
}
return hex.EncodeToString(md5sumBytes), nil
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageTier == nil || *o.storageTier == "" {
return standard
}
return *o.storageTier
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToLower(tier)
bucketName, bucketPath := o.split()
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
if !ok {
return fmt.Errorf("not a valid storage tier %v ", tier)
}
req := objectstorage.UpdateObjectStorageTierRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
ObjectName: common.String(bucketPath),
StorageTier: tierEnum,
},
}
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
if err != nil {
return err
}
o.storageTier = storageTierMap[tier]
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
// Convert base64 encoded md5 into lower case hex
if o.md5 == "" {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
}
return o.md5, nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned to the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == "" {
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.fs.Copy(ctx, o, o.remote)
return err
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucketName, bucketPath := o.split()
req := objectstorage.DeleteObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.DeleteObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// Open object file
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketName, bucketPath := o.split()
req := objectstorage.GetObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
o.applyGetObjectOptions(&req, options...)
useBYOKGetObject(o.fs, &req)
var resp objectstorage.GetObjectResponse
err := o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.srv.GetObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
// read size from ContentLength or ContentRange
bytes := resp.ContentLength
if resp.ContentRange != nil {
var contentRange = *resp.ContentRange
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
bytes = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
err = o.decodeMetaDataObject(&resp)
if err != nil {
return nil, err
}
o.bytes = *bytes
return resp.HTTPResponse().Body, nil
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
}
// determine if we like upload single or multipart.
size := src.Size()
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.PutObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
fs.Errorf(o, "put object failed %v", err)
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
return o.readMetaData(ctx)
}
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.CacheControl = common.String(value)
case "content-disposition":
req.ContentDisposition = common.String(value)
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.OpcMeta[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.HttpResponseCacheControl = common.String(value)
case "content-disposition":
req.HttpResponseContentDisposition = common.String(value)
case "content-encoding":
req.HttpResponseContentEncoding = common.String(value)
case "content-language":
req.HttpResponseContentLanguage = common.String(value)
case "content-type":
req.HttpResponseContentType = common.String(value)
case "range":
// do nothing
default:
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {
dst := make(map[string]string)
for lowerKey, value := range src {
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
dst[ociMetaPrefix+lowerKey] = value
}
}
return dst
}

View File

@@ -1,317 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
const (
maxSizeForCopy = 4768 * 1024 * 1024
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
)
const (
userPrincipal = "user_principal_auth"
instancePrincipal = "instance_principal_auth"
resourcePrincipal = "resource_principal_auth"
environmentAuth = "env_auth"
noAuth = "no_auth"
userPrincipalHelpText = `use an OCI user and an API key for authentication.
youll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
resourcePrincipalHelpText = `use resource principals to make API calls`
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
Compartment string `config:"compartment"`
Namespace string `config:"namespace"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
ConfigFile string `config:"config_file"`
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyFile string `config:"sse_customer_key_file"`
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
}
func newOptions() []fs.Option {
return []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your Auth Provider",
Required: true,
Default: environmentAuth,
Examples: []fs.OptionExample{{
Value: environmentAuth,
Help: environmentAuthHelpText,
}, {
Value: userPrincipal,
Help: userPrincipalHelpText,
}, {
Value: instancePrincipal,
Help: instancePrincipalHelpText,
}, {
Value: resourcePrincipal,
Help: resourcePrincipalHelpText,
}, {
Value: noAuth,
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
Sensitive: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
Sensitive: true,
}, {
Name: "region",
Help: "Object storage Region",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
Required: false,
}, {
Name: "config_file",
Help: "Path to OCI config file",
Provider: userPrincipal,
Default: "~/.oci/config",
Examples: []fs.OptionExample{{
Value: "~/.oci/config",
Help: "oci configuration file location",
}},
}, {
Name: "config_profile",
Help: "Profile name inside the oci config file",
Provider: userPrincipal,
Default: "Default",
Examples: []fs.OptionExample{{
Value: "Default",
Help: "Use the default profile",
}},
}, {
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
Name: "storage_tier",
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
Default: "Standard",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "Standard",
Help: "Standard storage tier, this is the default tier",
}, {
Value: "InfrequentAccess",
Help: "InfrequentAccess storage tier",
}, {
Value: "Archive",
Help: "Archive storage tier",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5 MiB and there can be at
most 10,000 chunks, this means that by default the maximum size of
a file you can stream upload is 48 GiB. If you wish to stream upload
larger files then you will need to increase chunk_size.
Increasing the chunk size decreases the accuracy of the progress
statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: defaultUploadConcurrency,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "copy_timeout",
Help: `Timeout for copy.
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
`,
Default: defaultCopyTimeoutDuration,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that OSS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
It can also be needed if the user you are using does not have bucket
creation permissions.
`,
Default: false,
Advanced: true,
}, {
Name: "sse_customer_key_file",
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key",
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
needed. For more information, see Using Your Own Keys for Server-Side Encryption
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key_sha256",
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_kms_key_id",
Help: `if using your own master key in vault, this header specifies the
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_algorithm",
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
Object Storage supports "AES256" as the encryption algorithm. For more information, see
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: sseDefaultAlgorithm,
Help: sseDefaultAlgorithm,
}},
}}
}

View File

@@ -1,698 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage
import (
"context"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "oracleobjectstorage",
Description: "Oracle Cloud Infrastructure Object Storage",
Prefix: "oos",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
})
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *objectstorage.ObjectStorageClient // the connection to the object storage
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
}
// NewFs Initialize backend
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = validateSSECustomerKeyOptions(opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
objectStorageClient, err := newObjectStorageClient(ctx, opt)
if err != nil {
return nil, err
}
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 2 (1 try and 1 retry) because we are
// relying on SDK retry mechanism, but we allow 2 attempts to
// retry directory listings after XMLSyntaxError
pc.SetRetries(2)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
srv: objectStorageClient,
cache: bucket.NewCache(),
pacer: pc,
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
}
// return an error with fs which points to the parent
return f, fs.ErrorIsFile
}
return f, err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// ------------------------------------------------------------
// Implement backed that represents a remote object storage server
// Fs is the interface a cloud storage system must provide
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return "oos:root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
}
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
// list the objects into the function supplied from
// the bucket and root supplied
// (bucket, directory) is the starting directory
// If prefix is set then it is removed from all file names
// If addBucket is set then it adds the bucket to the start of the remotes generated
// If recurse is set the function will recursively list
// If limit is > 0 then it limits to that many files (must be less than 1000)
// If hidden is set then it will list the hidden (deleted) files too.
// if findFile is set it will look for files called (bucket, directory)
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
fn listFn) (err error) {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = objectstorage.ListObjectsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucket),
Prefix: common.String(directory),
Limit: common.Int(chunkSize),
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
}
if delimiter != "" {
request.Delimiter = common.String(delimiter)
}
for {
var resp objectstorage.ListObjectsResponse
err = f.pacer.Call(func() (bool, error) {
var err error
resp, err = f.srv.ListObjects(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(common.ServiceError); ok {
// 301 if wrong region for bucket
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
if !recurse {
for _, commonPrefix := range resp.ListObjects.Prefixes {
if commonPrefix == "" {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := commonPrefix
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
if err != nil {
return err
}
}
}
for i := range resp.Objects {
object := &resp.Objects[i]
// Finish if file name no longer has prefix
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
//}
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
if isDirectory && len(remote) > 1 {
remote = remote[:len(remote)-1]
}
err = fn(remote, object, isDirectory)
if err != nil {
return err
}
}
// end if no NextFileName
if resp.NextStartWith == nil {
break
}
request.Start = resp.NextStartWith
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
}
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
if f.opt.Provider == noAuth {
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
}
var request = objectstorage.ListBucketsRequest{
NamespaceName: common.String(f.opt.Namespace),
CompartmentId: common.String(f.opt.Compartment),
}
var resp objectstorage.ListBucketsResponse
for {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.ListBuckets(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
for _, item := range resp.Items {
bucketName := f.opt.Enc.ToStandardName(*item.Name)
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, item.TimeCreated.Time)
entries = append(entries, d)
}
if resp.OpcNextPage == nil {
break
}
request.Page = resp.OpcNextPage
}
return entries, nil
}
// Return an Object from a path
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.TimeModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = info.TimeModified.Time
}
if info.Md5 != nil {
md5, err := o.base64ToMd5(*info.Md5)
if err != nil {
o.md5 = md5
}
}
o.bytes = *info.Size
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Put the object into the bucket
// Copy the reader in to the new object which is returned
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucketName, _ := f.split(dir)
return f.makeBucket(ctx, bucketName)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
if f.opt.NoCheckBucket {
return nil
}
return f.cache.Create(bucketName, func() error {
details := objectstorage.CreateBucketDetails{
Name: common.String(bucketName),
CompartmentId: common.String(f.opt.Compartment),
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
}
req := objectstorage.CreateBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
CreateBucketDetails: details,
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CreateBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
}
if svcErr, ok := err.(common.ServiceError); ok {
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
err = nil
}
}
return err
}, func() (bool, error) {
return f.bucketExists(ctx, bucketName)
})
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
req := objectstorage.HeadBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.HeadBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
return true, nil
}
if err, ok := err.(common.ServiceError); ok {
if err.GetHTTPStatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucketName, directory := f.split(dir)
if bucketName == "" || directory != "" {
return nil
}
return f.cache.Remove(bucketName, func() error {
req := objectstorage.DeleteBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.DeleteBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucketName)
}
return err
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
uploads []*objectstorage.MultipartUpload) (err error) {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
for _, upload := range uploads {
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
age := time.Since(upload.TimeCreated.Time)
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
upload.TimeCreated, age)
if age > maxAge {
fs.Infof(f, "removing %s", what)
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
}
}
return err
}
// CleanUp removes all pending multipart uploads
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
uploadsMap, err := f.listMultipartUploadsAll(ctx)
if err != nil {
return err
}
for bucketName, uploads := range uploadsMap {
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
if err != nil {
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
err = cleanErr
}
}
return err
}
// CleanUp removes all pending multipart uploads older than 24 hours
func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.cleanUp(ctx, 24*time.Hour)
}
// ------------------------------------------------------------
// Implement ListRer is an optional interfaces for Fs
//------------------------------------------------------------
/*
ListR lists the objects and directories of the Fs starting
from dir recursively into out.
dir should be "" to start from the root, and should not
have trailing slashes.
This should return ErrDirNotFound if the directory isn't
found.
It should call callback for each tranche of entries read.
These need not be returned in any particular order. If
callback returns an error then the listing will stop
immediately.
Don't implement this unless you have a more efficient way
of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucketName == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucketName := entry.Remote()
err = listR(bucketName, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
} else {
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
return list.Flush()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -1,33 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOracleObjectStorage:",
TiersToTest: []string{"standard", "archive"},
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -1,7 +0,0 @@
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
package oracleobjectstorage

View File

@@ -1,362 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
)
var refreshGracePeriod = 30 * time.Second
// StateRefreshFunc is a function type used for StateChangeConf that is
// responsible for refreshing the item being watched for a state change.
//
// It returns three results. `result` is any object that will be returned
// as the final object after waiting for state change. This allows you to
// return the final updated object, for example an EC2 instance after refreshing
// it. A nil result represents not found.
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result interface{}, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
Delay time.Duration // Wait this time before starting checks
Pending []string // States that are "allowed" and will continue trying
Refresh StateRefreshFunc // Refreshes the current state
Target []string // Target state
Timeout time.Duration // The amount of time to wait before timeout
MinTimeout time.Duration // Smallest time to wait before refreshes
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
// This is to work around inconsistent APIs
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
}
// WaitForStateContext watches an object and waits for it to achieve the state
// specified in the configuration using the specified Refresh() func,
// waiting the number of seconds specified in the timeout configuration.
//
// If the Refresh function returns an error, exit immediately with that error.
//
// If the Refresh function returns a state other than the Target state or one
// listed in Pending, return immediately with an error.
//
// If the Timeout is exceeded before reaching the Target state, return an
// error.
//
// Otherwise, the result is the result of the first call to the Refresh function to
// reach the target state.
//
// Cancellation from the passed in context will cancel the refresh loop
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
targetOccurrence := 0
// Set a default for times to check for not found
if conf.NotFoundChecks == 0 {
conf.NotFoundChecks = 20
}
if conf.ContinuousTargetOccurrence == 0 {
conf.ContinuousTargetOccurrence = 1
}
type Result struct {
Result interface{}
State string
Error error
Done bool
}
// Read every result from the refresh loop, waiting for a positive result.Done.
resCh := make(chan Result, 1)
// cancellation channel for the refresh loop
cancelCh := make(chan struct{})
result := Result{}
go func() {
defer close(resCh)
select {
case <-time.After(conf.Delay):
case <-cancelCh:
return
}
// start with 0 delay for the first loop
var wait time.Duration
for {
// store the last result
resCh <- result
// wait and watch for cancellation
select {
case <-cancelCh:
return
case <-time.After(wait):
// first round had no wait
if wait == 0 {
wait = 100 * time.Millisecond
}
}
res, currentState, err := conf.Refresh()
result = Result{
Result: res,
State: currentState,
Error: err,
}
if err != nil {
resCh <- result
return
}
// If we're waiting for the absence of a thing, then return
if res == nil && len(conf.Target) == 0 {
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
if res == nil {
// If we didn't find the resource, check if we have been
// not finding it for a while, and if so, report an error.
notfoundTick++
if notfoundTick > conf.NotFoundChecks {
result.Error = &NotFoundError{
LastError: err,
Retries: notfoundTick,
}
resCh <- result
return
}
} else {
// Reset the counter for when a resource isn't found
notfoundTick = 0
found := false
for _, allowed := range conf.Target {
if currentState == allowed {
found = true
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
}
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurrence = 0
break
}
}
if !found && len(conf.Pending) > 0 {
result.Error = &UnexpectedStateError{
LastError: err,
State: result.State,
ExpectedState: conf.Target,
}
resCh <- result
return
}
}
// Wait between refreshes using exponential backoff, except when
// waiting for the target state to reoccur.
if targetOccurrence == 0 {
wait *= 2
}
// If a poll interval has been specified, choose that interval.
// Otherwise, bound the default value.
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
wait = conf.PollInterval
} else {
if wait < conf.MinTimeout {
wait = conf.MinTimeout
} else if wait > 10*time.Second {
wait = 10 * time.Second
}
}
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
}
}()
// store the last value result from the refresh loop
lastResult := Result{}
timeout := time.After(conf.Timeout)
for {
select {
case r, ok := <-resCh:
// channel closed, so return the last result
if !ok {
return lastResult.Result, lastResult.Error
}
// we reached the intended state
if r.Done {
return r.Result, r.Error
}
// still waiting, store the last result
lastResult = r
case <-ctx.Done():
close(cancelCh)
return nil, ctx.Err()
case <-timeout:
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
// cancel the goroutine and start our grace period timer
close(cancelCh)
timeout := time.After(refreshGracePeriod)
// we need a for loop and a label to break on, because we may have
// an extra response value to read, but still want to wait for the
// channel to close.
forSelect:
for {
select {
case r, ok := <-resCh:
if r.Done {
// the last refresh loop reached the desired state
return r.Result, r.Error
}
if !ok {
// the goroutine returned
break forSelect
}
// target state not reached, save the result for the
// TimeoutError and wait for the channel to close
lastResult = r
case <-ctx.Done():
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
break forSelect
case <-timeout:
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
break forSelect
}
}
return nil, &TimeoutError{
LastError: lastResult.Error,
LastState: lastResult.State,
Timeout: conf.Timeout,
ExpectedState: conf.Target,
}
}
}
}
// NotFoundError resource not found error
type NotFoundError struct {
LastError error
LastRequest interface{}
LastResponse interface{}
Message string
Retries int
}
func (e *NotFoundError) Error() string {
if e.Message != "" {
return e.Message
}
if e.Retries > 0 {
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
}
return "couldn't find resource"
}
func (e *NotFoundError) Unwrap() error {
return e.LastError
}
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
type UnexpectedStateError struct {
LastError error
State string
ExpectedState []string
}
func (e *UnexpectedStateError) Error() string {
return fmt.Sprintf(
"unexpected state '%s', wanted target '%s'. last error: %s",
e.State,
strings.Join(e.ExpectedState, ", "),
e.LastError,
)
}
func (e *UnexpectedStateError) Unwrap() error {
return e.LastError
}
// TimeoutError is returned when WaitForState times out
type TimeoutError struct {
LastError error
LastState string
Timeout time.Duration
ExpectedState []string
}
func (e *TimeoutError) Error() string {
expectedState := "resource to be gone"
if len(e.ExpectedState) > 0 {
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
}
extraInfo := make([]string, 0)
if e.LastState != "" {
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
}
if e.Timeout > 0 {
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
}
suffix := ""
if len(extraInfo) > 0 {
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
}
if e.LastError != nil {
return fmt.Sprintf("timeout while waiting for %s%s: %s",
expectedState, suffix, e.LastError)
}
return fmt.Sprintf("timeout while waiting for %s%s",
expectedState, suffix)
}
func (e *TimeoutError) Unwrap() error {
return e.LastError
}

View File

@@ -110,11 +110,10 @@ func init() {
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}, { }, {
Name: "root_folder_id", Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0", Default: "d0",
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "hostname", Name: "hostname",
Help: `Hostname to connect to. Help: `Hostname to connect to.
@@ -139,8 +138,7 @@ with rclone authorize.
This is only required when you want to use the cleanup command. Due to a bug This is only required when you want to use the cleanup command. Due to a bug
in the pcloud API the required API does not support OAuth authentication so in the pcloud API the required API does not support OAuth authentication so
we have to rely on user password authentication for it.`, we have to rely on user password authentication for it.`,
Advanced: true, Advanced: true,
Sensitive: true,
}, { }, {
Name: "password", Name: "password",
Help: "Your pcloud password.", Help: "Your pcloud password.",

View File

@@ -1,536 +0,0 @@
// Package api has type definitions for pikpak
//
// Manually obtained from the API responses using Browse Dev. Tool and https://mholt.github.io/json-to-go/
package api
import (
"fmt"
"reflect"
"strconv"
"time"
)
const (
// "2022-09-17T14:31:06.056+08:00"
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the pikpak API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
if string(data) == "null" || string(data) == `""` {
return nil
}
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Types of things in Item
const (
KindOfFolder = "drive#folder"
KindOfFile = "drive#file"
KindOfFileList = "drive#fileList"
KindOfResumable = "drive#resumable"
KindOfForm = "drive#form"
ThumbnailSizeS = "SIZE_SMALL"
ThumbnailSizeM = "SIZE_MEDIUM"
ThumbnailSizeL = "SIZE_LARGE"
PhaseTypeComplete = "PHASE_TYPE_COMPLETE"
PhaseTypeRunning = "PHASE_TYPE_RUNNING"
PhaseTypeError = "PHASE_TYPE_ERROR"
PhaseTypePending = "PHASE_TYPE_PENDING"
UploadTypeForm = "UPLOAD_TYPE_FORM"
UploadTypeResumable = "UPLOAD_TYPE_RESUMABLE"
ListLimit = 100
)
// ------------------------------------------------------------
// Error details api error from pikpak
type Error struct {
Reason string `json:"error"` // short description of the reason, e.g. "file_name_empty" "invalid_request"
Code int `json:"error_code"`
URL string `json:"error_url,omitempty"`
Message string `json:"error_description,omitempty"`
// can have either of `error_details` or `details``
ErrorDetails []*ErrorDetails `json:"error_details,omitempty"`
Details []*ErrorDetails `json:"details,omitempty"`
}
// ErrorDetails contains further details of api error
type ErrorDetails struct {
Type string `json:"@type,omitempty"`
Reason string `json:"reason,omitempty"`
Domain string `json:"domain,omitempty"`
Metadata struct {
} `json:"metadata,omitempty"` // TODO: undiscovered yet
Locale string `json:"locale,omitempty"` // e.g. "en"
Message string `json:"message,omitempty"`
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
Detail string `json:"detail,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Reason, e.Code)
if e.Message != "" {
out += ": " + e.Message
}
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// ------------------------------------------------------------
// Filters contains parameters for filters when listing.
//
// possible operators
// * in: a list of comma-separated string
// * eq: "true" or "false"
// * gt or lt: time format string, e.g. "2023-01-28T10:56:49.757+08:00"
type Filters struct {
Phase map[string]string `json:"phase,omitempty"` // "in" or "eq"
Trashed map[string]bool `json:"trashed,omitempty"` // "eq"
Kind map[string]string `json:"kind,omitempty"` // "eq"
Starred map[string]bool `json:"starred,omitempty"` // "eq"
ModifiedTime map[string]string `json:"modified_time,omitempty"` // "gt" or "lt"
}
// Set sets filter values using field name, operator and corresponding value
func (f *Filters) Set(field, operator, value string) {
if value == "" {
// UNSET for empty values
return
}
r := reflect.ValueOf(f)
fd := reflect.Indirect(r).FieldByName(field)
if v, err := strconv.ParseBool(value); err == nil {
fd.Set(reflect.ValueOf(map[string]bool{operator: v}))
} else {
fd.Set(reflect.ValueOf(map[string]string{operator: value}))
}
}
// ------------------------------------------------------------
// Common Elements
// Link contains a download URL for opening files
type Link struct {
URL string `json:"url"`
Token string `json:"token"`
Expire Time `json:"expire"`
Type string `json:"type,omitempty"`
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
func (l *Link) Valid() bool {
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL
type URL struct {
Kind string `json:"kind,omitempty"` // e.g. "upload#url"
URL string `json:"url,omitempty"`
}
// ------------------------------------------------------------
// Base Elements
// FileList contains a list of File elements
type FileList struct {
Kind string `json:"kind,omitempty"` // drive#fileList
Files []*File `json:"files,omitempty"`
NextPageToken string `json:"next_page_token"`
Version string `json:"version,omitempty"`
VersionOutdated bool `json:"version_outdated,omitempty"`
}
// File is a basic element representing a single file object
//
// There are two types of download links,
// 1) one from File.WebContentLink or File.Links.ApplicationOctetStream.URL and
// 2) the other from File.Medias[].Link.URL.
// Empirically, 2) is less restrictive to multiple concurrent range-requests
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
// However, it is not generally applicable as it is only for meadia.
type File struct {
Apps []*FileApp `json:"apps,omitempty"`
Audit *FileAudit `json:"audit,omitempty"`
Collection string `json:"collection,omitempty"` // TODO
CreatedTime Time `json:"created_time,omitempty"`
DeleteTime Time `json:"delete_time,omitempty"`
FileCategory string `json:"file_category,omitempty"`
FileExtension string `json:"file_extension,omitempty"`
FolderType string `json:"folder_type,omitempty"`
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
IconLink string `json:"icon_link,omitempty"`
ID string `json:"id,omitempty"`
Kind string `json:"kind,omitempty"` // "drive#file"
Links *FileLinks `json:"links,omitempty"`
Md5Checksum string `json:"md5_checksum,omitempty"`
Medias []*Media `json:"medias,omitempty"`
MimeType string `json:"mime_type,omitempty"`
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
Name string `json:"name,omitempty"`
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
OriginalURL string `json:"original_url,omitempty"`
Params *FileParams `json:"params,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Phase string `json:"phase,omitempty"`
Revision int `json:"revision,omitempty,string"`
Size int64 `json:"size,omitempty,string"`
SortName string `json:"sort_name,omitempty"`
Space string `json:"space,omitempty"`
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
Starred bool `json:"starred,omitempty"`
ThumbnailLink string `json:"thumbnail_link,omitempty"`
Trashed bool `json:"trashed,omitempty"`
UserID string `json:"user_id,omitempty"`
UserModifiedTime Time `json:"user_modified_time,omitempty"`
WebContentLink string `json:"web_content_link,omitempty"`
Writable bool `json:"writable,omitempty"`
}
// FileLinks includes links to file at backend
type FileLinks struct {
ApplicationOctetStream *Link `json:"application/octet-stream,omitempty"`
}
// FileAudit contains audit information for the file
type FileAudit struct {
Status string `json:"status,omitempty"` // "STATUS_OK"
Message string `json:"message,omitempty"`
Title string `json:"title,omitempty"`
}
// Media contains info about supported version of media, e.g. original, transcoded, etc
type Media struct {
MediaID string `json:"media_id,omitempty"`
MediaName string `json:"media_name,omitempty"`
Video struct {
Height int `json:"height,omitempty"`
Width int `json:"width,omitempty"`
Duration int64 `json:"duration,omitempty"`
BitRate int `json:"bit_rate,omitempty"`
FrameRate int `json:"frame_rate,omitempty"`
VideoCodec string `json:"video_codec,omitempty"` // "h264", "hevc"
AudioCodec string `json:"audio_codec,omitempty"` // "pcm_bluray", "aac"
VideoType string `json:"video_type,omitempty"` // "mpegts"
HdrType string `json:"hdr_type,omitempty"`
} `json:"video,omitempty"`
Link *Link `json:"link,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
RedirectLink string `json:"redirect_link,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Priority int `json:"priority,omitempty"`
IsOrigin bool `json:"is_origin,omitempty"`
ResolutionName string `json:"resolution_name,omitempty"`
IsVisible bool `json:"is_visible,omitempty"`
Category string `json:"category,omitempty"`
}
// FileParams includes parameters for instant open
type FileParams struct {
Duration int64 `json:"duration,omitempty,string"` // in seconds
Height int `json:"height,omitempty,string"`
Platform string `json:"platform,omitempty"` // "Upload"
PlatformIcon string `json:"platform_icon,omitempty"`
URL string `json:"url,omitempty"`
Width int `json:"width,omitempty,string"`
}
// FileApp includes parameters for instant open
type FileApp struct {
ID string `json:"id,omitempty"` // "decompress" for rar files
Name string `json:"name,omitempty"` // decompress" for rar files
Access []interface{} `json:"access,omitempty"`
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
RedirectLink string `json:"redirect_link,omitempty"`
VipTypes []interface{} `json:"vip_types,omitempty"`
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
IconLink string `json:"icon_link,omitempty"`
IsDefault bool `json:"is_default,omitempty"`
Params struct {
} `json:"params,omitempty"` // TODO
CategoryIds []interface{} `json:"category_ids,omitempty"`
AdSceneType int `json:"ad_scene_type,omitempty"`
Space string `json:"space,omitempty"`
Links struct {
} `json:"links,omitempty"` // TODO
}
// ------------------------------------------------------------
// TaskList contains a list of Task elements
type TaskList struct {
Tasks []*Task `json:"tasks,omitempty"` // "drive#task"
NextPageToken string `json:"next_page_token"`
ExpiresIn int `json:"expires_in,omitempty"`
}
// Task is a basic element representing a single task such as offline download and upload
type Task struct {
Kind string `json:"kind,omitempty"` // "drive#task"
ID string `json:"id,omitempty"` // task id?
Name string `json:"name,omitempty"` // torrent name?
Type string `json:"type,omitempty"` // "offline"
UserID string `json:"user_id,omitempty"`
Statuses []interface{} `json:"statuses,omitempty"` // TODO
StatusSize int `json:"status_size,omitempty"` // TODO
Params *TaskParams `json:"params,omitempty"` // TODO
FileID string `json:"file_id,omitempty"`
FileName string `json:"file_name,omitempty"`
FileSize string `json:"file_size,omitempty"`
Message string `json:"message,omitempty"` // e.g. "Saving"
CreatedTime Time `json:"created_time,omitempty"`
UpdatedTime Time `json:"updated_time,omitempty"`
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
Progress int `json:"progress,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Callback string `json:"callback,omitempty"`
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
Space string `json:"space,omitempty"`
}
// TaskParams includes parameters informing status of Task
type TaskParams struct {
Age string `json:"age,omitempty"`
PredictSpeed string `json:"predict_speed,omitempty"`
PredictType string `json:"predict_type,omitempty"`
URL string `json:"url,omitempty"`
}
// Form contains parameters for upload by multipart/form-data
type Form struct {
Headers struct{} `json:"headers"`
Kind string `json:"kind"` // "drive#form"
Method string `json:"method"` // "POST"
MultiParts struct {
OSSAccessKeyID string `json:"OSSAccessKeyId"`
Signature string `json:"Signature"`
Callback string `json:"callback"`
Key string `json:"key"`
Policy string `json:"policy"`
XUserData string `json:"x:user_data"`
} `json:"multi_parts"`
URL string `json:"url"`
}
// Resumable contains parameters for upload by resumable
type Resumable struct {
Kind string `json:"kind,omitempty"` // "drive#resumable"
Provider string `json:"provider,omitempty"` // e.g. "PROVIDER_ALIYUN"
Params *ResumableParams `json:"params,omitempty"`
}
// ResumableParams specifies resumable paramegers
type ResumableParams struct {
AccessKeyID string `json:"access_key_id,omitempty"`
AccessKeySecret string `json:"access_key_secret,omitempty"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Expiration Time `json:"expiration,omitempty"`
Key string `json:"key,omitempty"`
SecurityToken string `json:"security_token,omitempty"`
}
// FileInArchive is a basic element in archive
type FileInArchive struct {
Index int `json:"index,omitempty"`
Filename string `json:"filename,omitempty"`
Filesize string `json:"filesize,omitempty"`
MimeType string `json:"mime_type,omitempty"`
Gcid string `json:"gcid,omitempty"`
Kind string `json:"kind,omitempty"`
IconLink string `json:"icon_link,omitempty"`
Path string `json:"path,omitempty"`
}
// ------------------------------------------------------------
// NewFile is a response to RequestNewFile
type NewFile struct {
File *File `json:"file,omitempty"`
Form *Form `json:"form,omitempty"`
Resumable *Resumable `json:"resumable,omitempty"`
Task *Task `json:"task,omitempty"` // null in this case
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// NewTask is a response to RequestNewTask
type NewTask struct {
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
File *File `json:"file,omitempty"` // null in this case
Task *Task `json:"task,omitempty"`
URL *URL `json:"url,omitempty"` // {"kind": "upload#url"}
}
// About informs drive status
type About struct {
Kind string `json:"kind,omitempty"` // "drive#about"
Quota *Quota `json:"quota,omitempty"`
ExpiresAt string `json:"expires_at,omitempty"`
Quotas struct {
} `json:"quotas,omitempty"` // maybe []*Quota?
}
// Quota informs drive quota
type Quota struct {
Kind string `json:"kind,omitempty"` // "drive#quota"
Limit int64 `json:"limit,omitempty,string"` // limit in bytes
Usage int64 `json:"usage,omitempty,string"` // bytes in use
UsageInTrash int64 `json:"usage_in_trash,omitempty,string"` // bytes in trash but this seems not working
PlayTimesLimit string `json:"play_times_limit,omitempty"` // maybe in seconds
PlayTimesUsage string `json:"play_times_usage,omitempty"` // maybe in seconds
}
// Share is a response to RequestShare
//
// used in PublicLink()
type Share struct {
ShareID string `json:"share_id,omitempty"`
ShareURL string `json:"share_url,omitempty"`
PassCode string `json:"pass_code,omitempty"`
ShareText string `json:"share_text,omitempty"`
}
// User contains user account information
//
// GET https://user.mypikpak.com/v1/user/me
type User struct {
Sub string `json:"sub,omitempty"` // userid for internal use
Name string `json:"name,omitempty"` // Username
Picture string `json:"picture,omitempty"` // URL to Avatar image
Email string `json:"email,omitempty"` // redacted email address
Providers *[]UserProvider `json:"providers,omitempty"` // OAuth provider
PhoneNumber string `json:"phone_number,omitempty"`
Password string `json:"password,omitempty"` // "SET" if configured
Status string `json:"status,omitempty"` // "ACTIVE"
CreatedAt Time `json:"created_at,omitempty"`
PasswordUpdatedAt Time `json:"password_updated_at,omitempty"`
}
// UserProvider details third-party authentication
type UserProvider struct {
ID string `json:"id,omitempty"` // e.g. "google.com"
ProviderUserID string `json:"provider_user_id,omitempty"`
Name string `json:"name,omitempty"` // username
}
// VIP includes subscription details about premium account
//
// GET https://api-drive.mypikpak.com/drive/v1/privilege/vip
type VIP struct {
Result string `json:"result,omitempty"` // "ACCEPTED"
Message string `json:"message,omitempty"`
RedirectURI string `json:"redirect_uri,omitempty"`
Data struct {
Expire Time `json:"expire,omitempty"`
Status string `json:"status,omitempty"` // "invalid" or "ok"
Type string `json:"type,omitempty"` // "novip" or "platinum"
UserID string `json:"user_id,omitempty"` // same as User.Sub
} `json:"data,omitempty"`
}
// DecompressResult is a response to RequestDecompress
type DecompressResult struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"`
TaskID string `json:"task_id,omitempty"` // same as File.Id
FilesNum int `json:"files_num,omitempty"` // number of files in archive
RedirectLink string `json:"redirect_link,omitempty"`
}
// ------------------------------------------------------------
// RequestShare is to request for file share
type RequestShare struct {
FileIds []string `json:"file_ids,omitempty"`
ShareTo string `json:"share_to,omitempty"` // "publiclink",
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
}
// RequestBatch is to request for batch actions
type RequestBatch struct {
Ids []string `json:"ids,omitempty"`
To map[string]string `json:"to,omitempty"`
}
// RequestNewFile is to request for creating a new `drive#folder` or `drive#file`
type RequestNewFile struct {
// always required
Kind string `json:"kind"` // "drive#folder" or "drive#file"
Name string `json:"name"`
ParentID string `json:"parent_id"`
FolderType string `json:"folder_type"`
// only when uploading a new file
Hash string `json:"hash,omitempty"` // sha1sum
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
Size int64 `json:"size,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
}
// RequestNewTask is to request for creating a new task like offline downloads
//
// Name and ParentID can be left empty.
type RequestNewTask struct {
Kind string `json:"kind,omitempty"` // "drive#file"
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_URL"
URL *URL `json:"url,omitempty"` // {"url": downloadUrl}
FolderType string `json:"folder_type,omitempty"` // "" if parent_id else "DOWNLOAD"
}
// RequestDecompress is to request for decompress of archive files
type RequestDecompress struct {
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Password string `json:"password,omitempty"` // ""
FileID string `json:"file_id,omitempty"`
Files []*FileInArchive `json:"files,omitempty"` // can request selected files to be decompressed
DefaultParent bool `json:"default_parent,omitempty"`
}
// ------------------------------------------------------------
// NOT implemented YET
// RequestArchiveFileList is to request for a list of files in archive
//
// POST https://api-drive.mypikpak.com/decompress/v1/list
type RequestArchiveFileList struct {
Gcid string `json:"gcid,omitempty"` // same as api.File.Hash
Path string `json:"path,omitempty"` // "" by default
Password string `json:"password,omitempty"` // "" by default
FileID string `json:"file_id,omitempty"`
}
// ArchiveFileList is a response to RequestArchiveFileList
type ArchiveFileList struct {
Status string `json:"status,omitempty"` // "OK"
StatusText string `json:"status_text,omitempty"` // ""
TaskID string `json:"task_id,omitempty"` // ""
CurrentPath string `json:"current_path,omitempty"` // ""
Title string `json:"title,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
Gcid string `json:"gcid,omitempty"` // same as File.Hash
Files []*FileInArchive `json:"files,omitempty"`
}

View File

@@ -1,253 +0,0 @@
package pikpak
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/lib/rest"
)
// Globals
const (
cachePrefix = "rclone-pikpak-sha1sum-"
)
// requestDecompress requests decompress of compressed files
func (f *Fs) requestDecompress(ctx context.Context, file *api.File, password string) (info *api.DecompressResult, err error) {
req := &api.RequestDecompress{
Gcid: file.Hash,
Password: password,
FileID: file.ID,
Files: []*api.FileInArchive{},
DefaultParent: true,
}
opts := rest.Opts{
Method: "POST",
Path: "/decompress/v1/decompress",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getUserInfo gets UserInfo from API
func (f *Fs) getUserInfo(ctx context.Context) (info *api.User, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://user.mypikpak.com/v1/user/me",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get userinfo: %w", err)
}
return
}
// getVIPInfo gets VIPInfo from API
func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://api-drive.mypikpak.com/drive/v1/privilege/vip",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get vip info: %w", err)
}
return
}
// requestBatchAction requests batch actions to API
//
// action can be one of batch{Copy,Delete,Trash,Untrash}
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files:" + action,
NoResponse: true, // Only returns `{"task_id":""}
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("batch action %q failed: %w", action, err)
}
return nil
}
// requestNewTask requests a new api.NewTask and returns api.Task
func (f *Fs) requestNewTask(ctx context.Context, req *api.RequestNewTask) (info *api.Task, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var newTask api.NewTask
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &newTask)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return newTask.Task, nil
}
// requestNewFile requests a new api.NewFile and returns api.File
func (f *Fs) requestNewFile(ctx context.Context, req *api.RequestNewFile) (info *api.NewFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/files",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getFile gets api.File from API for the ID passed
// and returns rich information containing additional fields below
// * web_content_link
// * thumbnail_link
// * links
// * medias
func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && info.Phase != api.PhaseTypeComplete {
// could be pending right after file is created/uploaded.
return true, errors.New("not PHASE_TYPE_COMPLETE")
}
return f.shouldRetry(ctx, resp, err)
})
return
}
// patchFile updates attributes of the file by ID
//
// currently known patchable fields are
// * name
func (f *Fs) patchFile(ctx context.Context, ID string, req *api.File) (info *api.File, err error) {
opts := rest.Opts{
Method: "PATCH",
Path: "/drive/v1/files/" + ID,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// getAbout gets drive#quota information from server
func (f *Fs) getAbout(ctx context.Context) (info *api.About, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/drive/v1/about",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// requestShare returns information about ssharable links
func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api.Share, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/drive/v1/share",
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
return f.shouldRetry(ctx, resp, err)
})
return
}
// Read the sha1 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
// we need an SHA1
hash := sha1.New()
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
teeReader := io.TeeReader(in, hash)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the SHA1 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test PikPak filesystem interface
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*pikpak.Object)(nil),
})
}

View File

@@ -82,15 +82,14 @@ func init() {
OAuth2Config: oauthConfig, OAuth2Config: oauthConfig,
}) })
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "api_key", Name: "api_key",
Help: `API Key. Help: `API Key.
This is not normally used - use oauth instead. This is not normally used - use oauth instead.
`, `,
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Default: "", Default: "",
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -100,7 +99,7 @@ This is not normally used - use oauth instead.
encoder.EncodeBackSlash | encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote | encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8), encoder.EncodeInvalidUtf8),
}}...), }},
}) })
} }

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More