1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-26 05:03:43 +00:00

Compare commits

..

1 Commits

717 changed files with 18741 additions and 69836 deletions

View File

@@ -15,24 +15,22 @@ on:
workflow_dispatch: workflow_dispatch:
inputs: inputs:
manual: manual:
description: Manual run (bypass default conditions)
type: boolean
required: true required: true
default: true default: true
jobs: jobs:
build: build:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60 timeout-minutes: 60
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19'] job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.20' go: '1.18.x'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -41,16 +39,9 @@ jobs:
librclonetest: true librclonetest: true
deploy: true deploy: true
- job_name: linux_386
os: ubuntu-latest
go: '1.20'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-11 os: macos-11
go: '1.20' go: '1.18.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -59,14 +50,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-11 os: macos-11
go: '1.20' go: '1.18.x'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '1.20' go: '1.18.x'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -76,20 +67,20 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.20' go: '1.18.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
- job_name: go1.18 - job_name: go1.16
os: ubuntu-latest os: ubuntu-latest
go: '1.18' go: '1.16.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
- job_name: go1.19 - job_name: go1.17
os: ubuntu-latest os: ubuntu-latest
go: '1.19' go: '1.17.x'
quicktest: true quicktest: true
racequicktest: true racequicktest: true
@@ -99,13 +90,14 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v2
with: with:
stable: 'false'
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
check-latest: true check-latest: true
@@ -124,7 +116,7 @@ jobs:
sudo modprobe fuse sudo modprobe fuse
sudo chmod 666 /dev/fuse sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse3 libfuse-dev rpm pkg-config sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest' if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS - name: Install Libraries on macOS
@@ -163,7 +155,7 @@ jobs:
env env
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -220,54 +212,45 @@ jobs:
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
lint: lint:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "lint" name: "lint"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v2
with: with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android: android:
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30 timeout-minutes: 30
name: "android-all" name: "android-all"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v1
with: with:
go-version: '1.20' go-version: 1.18.x
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -288,29 +271,27 @@ jobs:
go install golang.org/x/mobile/cmd/gobind@latest go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a gomobile build - name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables - name: arm-v7a Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build - name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables - name: arm64-v8a Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -318,12 +299,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build - name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables - name: x86 Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV echo 'GOARCH=386' >> $GITHUB_ENV
@@ -331,12 +312,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build - name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables - name: x64 Set environment variables
shell: bash shell: bash
run: | run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV echo 'GOARCH=amd64' >> $GITHUB_ENV
@@ -344,7 +325,7 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build - name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 . run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts - name: Upload artifacts
run: | run: |

View File

@@ -12,7 +12,7 @@ jobs:
name: Build image job name: Build image job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish image - name: Build and publish image

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job name: Build image job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -20,7 +20,7 @@ issues:
exclude-use-default: false exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0

View File

@@ -77,7 +77,7 @@ Make sure you
* Add [documentation](#writing-documentation) for a new feature. * Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages). * [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub: When you are done with that push your changes to Github:
git push -u origin my-new-feature git push -u origin my-new-feature
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits). You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ## ## Using Git and Github ##
### Committing your changes ### ### Committing your changes ###

10281
MANUAL.html generated

File diff suppressed because it is too large Load Diff

9971
MANUAL.md generated

File diff suppressed because it is too large Load Diff

10225
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,9 +81,6 @@ quicktest:
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"

View File

@@ -42,15 +42,12 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -63,20 +60,17 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/) * Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/) * Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -89,19 +83,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
### Virtual storage providers
These backends adapt or modify other storage providers
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features ## Features
* MD5/SHA-1 hashes checked at all times for file integrity * MD5/SHA-1 hashes checked at all times for file integrity
@@ -116,7 +97,7 @@ These backends adapt or modify other storage providers
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk * Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA * Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation ## Installation & documentation
@@ -137,5 +118,5 @@ Please see the [rclone website](https://rclone.org/) for:
License License
------- -------
This is free software under the terms of the MIT license (check the This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package). [COPYING file](/COPYING) included in this package).

View File

@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release ## Making a point release
If rclone needs a point release due to some horrendous bug: If rclone needs a point release due to some horrendous bug:
@@ -74,7 +66,8 @@ Set vars
First make the release branch. If this is a second point release then First make the release branch. If this is a second point release then
this will be done already. this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0 * git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable * make startstable
Now Now

View File

@@ -1 +1 @@
v1.62.0 v1.59.0

View File

@@ -1,4 +1,3 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias package alias
import ( import (

View File

@@ -1,4 +1,3 @@
// Package all imports all the backends
package all package all
import ( import (
@@ -10,7 +9,6 @@ import (
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
@@ -22,8 +20,8 @@ import (
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hasher" _ "github.com/rclone/rclone/backend/hasher"
_ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
@@ -34,7 +32,6 @@ import (
_ "github.com/rclone/rclone/backend/netstorage" _ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
@@ -44,7 +41,6 @@ import (
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"

View File

@@ -435,7 +435,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
query += " AND kind:" + folderKind query += " AND kind:" + folderKind
} else if filesOnly { } else if filesOnly {
query += " AND kind:" + fileKind query += " AND kind:" + fileKind
//} else { } else {
// FIXME none of these work // FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")" //query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")" //query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
@@ -556,9 +556,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// This is a workaround for Amazon sometimes returning // This is a workaround for Amazon sometimes returning
// //
// - 408 REQUEST_TIMEOUT // * 408 REQUEST_TIMEOUT
// - 504 GATEWAY_TIMEOUT // * 504 GATEWAY_TIMEOUT
// - 500 Internal server error // * 500 Internal server error
// //
// At the end of large uploads. The speculation is that the timeout // At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well // is waiting for the sha1 hashing to complete and the file may well
@@ -626,7 +626,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Put the object into the container // Put the object into the container
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -685,9 +685,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1002,6 +1002,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
// //
//
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js && go1.18 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js,go1.18 // +build !plan9,!solaris,!js
package azureblob package azureblob

View File

@@ -1,11 +1,12 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && go1.18 //go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js,go1.18 // +build !plan9,!solaris,!js
package azureblob package azureblob
import ( import (
"context"
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -16,12 +17,10 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{},
MinChunkSize: defaultChunkSize,
},
}) })
} }
@@ -33,6 +32,36 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
) )
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) { func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct { tests := map[string]struct {
accessTier string accessTier string

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
//go:build plan9 || solaris || js || !go1.18 //go:build plan9 || solaris || js
// +build plan9 solaris js !go1.18 // +build plan9 solaris js
package azureblob package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("Couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("Couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Backblaze B2 API.
package api package api
import ( import (
@@ -239,7 +238,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last // If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using // modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base // src_last_modified_millis as the name, and a string holding the base
// 10 number of milliseconds since midnight, January 1, 1970 // 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the // UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with // programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the // Java's time long. For example, it can be passed directly into the

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system. // Package b2 provides an interface to the Backblaze B2 object storage system
package b2 package b2
// FIXME should we remove sha1 checks from here as rclone now supports // FIXME should we remove sha1 checks from here as rclone now supports
@@ -280,7 +280,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.rootBucket == "" { if f.rootBucket == "" {
return "B2 root" return fmt.Sprintf("B2 root")
} }
if f.rootDirectory == "" { if f.rootDirectory == "" {
return fmt.Sprintf("B2 bucket %s", f.rootBucket) return fmt.Sprintf("B2 bucket %s", f.rootBucket)
@@ -656,15 +656,15 @@ var errEndList = errors.New("end list")
// //
// (bucket, directory) is the starting directory // (bucket, directory) is the starting directory
// //
// If prefix is set then it is removed from all file names. // If prefix is set then it is removed from all file names
// //
// If addBucket is set then it adds the bucket to the start of the // If addBucket is set then it adds the bucket to the start of the
// remotes generated. // remotes generated
// //
// If recurse is set the function will recursively list. // If recurse is set the function will recursively list
// //
// If limit is > 0 then it limits to that many files (must be less // If limit is > 0 then it limits to that many files (must be less
// than 1000). // than 1000)
// //
// If hidden is set then it will list the hidden (deleted) files too. // If hidden is set then it will list the hidden (deleted) files too.
// //
@@ -1025,7 +1025,7 @@ func (f *Fs) clearBucketID(bucket string) {
// Put the object into the bucket // Put the object into the bucket
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1205,7 +1205,10 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
} }
} }
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool { var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
return time.Since(time.Time(timestamp)).Hours() > 24 if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
} }
// Delete Config.Transfers in parallel // Delete Config.Transfers in parallel
@@ -1221,7 +1224,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err) fs.Errorf(object.Name, "Can't create object %v", err)
continue continue
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(ctx, object.ID, object.Name) err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err) checkErr(err)
tr.Done(ctx, err) tr.Done(ctx, err)
@@ -1235,7 +1238,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil { if err != nil {
fs.Errorf(object, "Can't create object %+v", err) fs.Errorf(object, "Can't create object %+v", err)
} }
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking") tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
// Check current version of the file // Check current version of the file
if object.Action == "hide" { if object.Action == "hide" {
@@ -1334,9 +1337,9 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1478,23 +1481,26 @@ func (o *Object) Size() int64 {
// Clean the SHA1 // Clean the SHA1
// //
// Make sure it is lower case. // Make sure it is lower case
// //
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html // Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (e.g. Cyberduck) use this // Some tools (e.g. Cyberduck) use this
func cleanSHA1(sha1 string) string { func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:" const unverified = "unverified:"
return strings.TrimPrefix(strings.ToLower(sha1), unverified) if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
} }
// decodeMetaDataRaw sets the metadata from the data passed in // decodeMetaDataRaw sets the metadata from the data passed in
// //
// Sets // Sets
// // o.id
// o.id // o.modTime
// o.modTime // o.size
// o.size // o.sha1
// o.sha1
func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) { func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
o.id = ID o.id = ID
o.sha1 = SHA1 o.sha1 = SHA1
@@ -1513,11 +1519,10 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
// decodeMetaData sets the metadata in the object from an api.File // decodeMetaData sets the metadata in the object from an api.File
// //
// Sets // Sets
// // o.id
// o.id // o.modTime
// o.modTime // o.size
// o.size // o.sha1
// o.sha1
func (o *Object) decodeMetaData(info *api.File) (err error) { func (o *Object) decodeMetaData(info *api.File) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType) return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
} }
@@ -1525,11 +1530,10 @@ func (o *Object) decodeMetaData(info *api.File) (err error) {
// decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
// //
// Sets // Sets
// // o.id
// o.id // o.modTime
// o.modTime // o.size
// o.size // o.sha1
// o.sha1
func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) { func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType) return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
} }
@@ -1587,11 +1591,10 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
// readMetaData gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
// //
// Sets // Sets
// // o.id
// o.id // o.modTime
// o.modTime // o.size
// o.size // o.sha1
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" { if o.id != "" {
return nil return nil

View File

@@ -14,7 +14,6 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"time"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -22,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -99,7 +97,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
parts = size / int64(chunkSize) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
@@ -430,47 +428,18 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
) )
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error { g.Go(func() error {
for part := int64(1); part <= up.parts; part++ { for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
buf := getBuf() buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error // Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts. // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil { if gCtx.Err() != nil {
putBuf(buf) up.f.putBuf(buf, up.doCopy)
return nil return nil
} }
@@ -484,14 +453,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
buf = buf[:reqSize] buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf) _, err = io.ReadFull(up.in, buf)
if err != nil { if err != nil {
putBuf(buf) up.f.putBuf(buf, up.doCopy)
return err return err
} }
} }
part := part // for the closure part := part // for the closure
g.Go(func() (err error) { g.Go(func() (err error) {
defer putBuf(buf) defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy { if !up.doCopy {
err = up.transferChunk(gCtx, part, buf) err = up.transferChunk(gCtx, part, buf)
} else { } else {

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"` timeFormat = `"` + time.RFC3339 + `"`
) )
// Time represents date and time information for the // Time represents represents date and time information for the
// box API, by using RFC3339 // box API, by using RFC3339
type Time time.Time type Time time.Time

View File

@@ -17,9 +17,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile) file, err := ioutil.ReadFile(configFile)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err) return nil, fmt.Errorf("box: failed to read Box config: %w", err)
} }
@@ -266,7 +266,7 @@ type Fs struct {
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
@@ -692,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it // must have setMetaData called on it
// //
// Returns the object, leaf, directoryID and error. // Returns the object, leaf, directoryID and error
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -752,7 +752,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -792,9 +792,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// PutUnchecked the object into the container // PutUnchecked the object into the container
// //
// This will produce an error if the object already exists. // This will produce an error if the object already exists
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -877,9 +877,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -897,7 +897,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcPath := srcObj.fs.rootSlash() + srcObj.remote srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote dstPath := f.rootSlash() + remote
if strings.EqualFold(srcPath, dstPath) { if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath) return nil, fmt.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
} }
@@ -995,9 +995,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1235,6 +1235,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
// //
//
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1345,9 +1346,9 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// If existing is set then it updates the object rather than creating a new one. // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned. // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil { if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()

View File

@@ -1,7 +1,6 @@
//go:build !plan9 && !js //go:build !plan9 && !js
// +build !plan9,!js // +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache package cache
import ( import (
@@ -1038,7 +1037,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
fs.Debugf(dir, "list: remove entry: %v", entryRemote) fs.Debugf(dir, "list: remove entry: %v", entryRemote)
} }
entries = nil //nolint:ineffassign entries = nil
// and then iterate over the ones from source (temp Objects will override source ones) // and then iterate over the ones from source (temp Objects will override source ones)
var batchDirectories []*Directory var batchDirectories []*Directory
@@ -1129,7 +1128,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory: case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) _ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default: default:
return fmt.Errorf("unknown object type %T", entry) return fmt.Errorf("Unknown object type %T", entry)
} }
} }

View File

@@ -11,6 +11,7 @@ import (
goflag "flag" goflag "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"math/rand" "math/rand"
"os" "os"
@@ -101,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) { func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix()) id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs // Instantiate inner fs
innerFolder := "inner" innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder) runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil) rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content") runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "") listRoot, err := runInstance.list(t, rootFs, "")
@@ -164,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")} li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 { for _, r := range li2 {
var err error var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r)))) ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 { if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r) log.Printf("========== '%v' not in cache", r)
} else { } else {
@@ -223,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) { func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix()) id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -255,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) { func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix()) id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err) require.Error(t, err)
@@ -265,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -292,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("tidwcm%v", time.Now().Unix()) id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object // write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content") runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -310,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t) testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error var err error
// create some rand test data // create some rand test data
@@ -339,7 +347,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) { func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix()) id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -369,7 +378,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) { func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix()) id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote") t.Skip("test skipped with crypt remote")
} }
@@ -395,7 +405,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) { func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix()) id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -449,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) { func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix()) id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -535,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) { func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix()) id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal { if !runInstance.wrappedIsExternal {
t.Skipf("Not external") t.Skipf("Not external")
} }
@@ -621,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) { func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix()) id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -653,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -674,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386") t.Skip("Skip test on windows/386")
} }
id := fmt.Sprintf("timcsr%v", time.Now().Unix()) id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -709,7 +725,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) { func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix()) id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
@@ -746,7 +763,9 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10 vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix()) id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt { if runInstance.rootIsCrypt {
t.Skipf("skipping crypt") t.Skipf("skipping crypt")
@@ -822,7 +841,7 @@ func newRun() *run {
} }
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil { if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err)) panic(fmt.Sprintf("Failed to create temp dir: %v", err))
} }
@@ -847,7 +866,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc return enc
} }
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) { func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise() fstest.Initialise()
remoteExists := false remoteExists := false
for _, s := range config.FileSections() { for _, s := range config.FileSections() {
@@ -940,15 +959,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "") err := f.Features().Purge(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
@@ -970,7 +984,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024) chunk := int64(1024)
cnt := size / chunk cnt := size / chunk
left := size % chunk left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile") f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err) require.NoError(t, err)
for i := 0; i < int(cnt); i++ { for i := 0; i < int(cnt); i++ {

View File

@@ -19,7 +19,7 @@ func TestIntegration(t *testing.T) {
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -21,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) { func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix()) id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id)) _, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err) require.NoError(t, err)
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) { func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) { func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix()) id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb) testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
} }
func TestInternalUploadMoveExistingFile(t *testing.T) { func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix()) id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) { func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix()) id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err) require.NoError(t, err)
@@ -152,8 +162,10 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) { func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix()) id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err) require.NoError(t, err)
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) { func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo" id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) { func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo" id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue continue
} }
var data []byte var data []byte
data, err = io.ReadAll(resp.Body) data, err = ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
continue continue
} }
@@ -213,7 +213,7 @@ func (p *plexConnector) authenticate() error {
var data map[string]interface{} var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data) err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil { if err != nil {
return fmt.Errorf("failed to obtain token: %w", err) return fmt.Errorf("failed to obtain token: %v", err)
} }
tokenGen, ok := get(data, "user", "authToken") tokenGen, ok := get(data, "user", "authToken")
if !ok { if !ok {

View File

@@ -76,7 +76,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk // CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) { func (m *Memory) CleanChunksByNeed(offset int64) {
for key := range m.db.Items() { var items map[string]cache.Item
items = m.db.Items()
for key := range items {
sepIdx := strings.LastIndex(key, "-") sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64) keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil { if err != nil {

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strconv" "strconv"
@@ -249,7 +250,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
if val != nil { if val != nil {
err := json.Unmarshal(val, cachedDir) err := json.Unmarshal(val, cachedDir)
if err != nil { if err != nil {
return fmt.Errorf("error during unmarshalling obj: %w", err) return fmt.Errorf("error during unmarshalling obj: %v", err)
} }
} else { } else {
return fmt.Errorf("missing cached dir: %v", cachedDir) return fmt.Errorf("missing cached dir: %v", cachedDir)
@@ -455,7 +456,10 @@ func (b *Persistent) HasEntry(remote string) bool {
return fmt.Errorf("couldn't find object (%v)", remote) return fmt.Errorf("couldn't find object (%v)", remote)
}) })
return err == nil if err == nil {
return true
}
return false
} }
// HasChunk confirms the existence of a single chunk of an object // HasChunk confirms the existence of a single chunk of an object
@@ -472,7 +476,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10)) fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp) data, err := ioutil.ReadFile(fp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -485,7 +489,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm) _ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10)) filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm) err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil { if err != nil {
return err return err
} }
@@ -550,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error { err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket)) dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil { if dataTsBucket == nil {
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket) return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
} }
// iterate through ts // iterate through ts
c := dataTsBucket.Cursor() c := dataTsBucket.Cursor()
@@ -900,16 +904,16 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
v := bucket.Get([]byte(remote)) v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj) err = json.Unmarshal(v, tempObj)
if err != nil { if err != nil {
return fmt.Errorf("pending upload (%v) not found: %w", remote, err) return fmt.Errorf("pending upload (%v) not found %v", remote, err)
} }
tempObj.Started = false tempObj.Started = false
v2, err := json.Marshal(tempObj) v2, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return fmt.Errorf("pending upload not updated: %w", err) return fmt.Errorf("pending upload not updated %v", err)
} }
err = bucket.Put([]byte(tempObj.DestPath), v2) err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil { if err != nil {
return fmt.Errorf("pending upload not updated: %w", err) return fmt.Errorf("pending upload not updated %v", err)
} }
return nil return nil
}) })
@@ -965,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
} }
v2, err := json.Marshal(tempObj) v2, err := json.Marshal(tempObj)
if err != nil { if err != nil {
return fmt.Errorf("pending upload not updated: %w", err) return fmt.Errorf("pending upload not updated %v", err)
} }
err = bucket.Put([]byte(tempObj.DestPath), v2) err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil { if err != nil {
return fmt.Errorf("pending upload not updated: %w", err) return fmt.Errorf("pending upload not updated %v", err)
} }
return nil return nil

View File

@@ -12,6 +12,7 @@ import (
"fmt" "fmt"
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"path" "path"
"regexp" "regexp"
@@ -31,6 +32,7 @@ import (
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
//
// Chunker's composite files have one or more chunks // Chunker's composite files have one or more chunks
// and optional metadata object. If it's present, // and optional metadata object. If it's present,
// meta object is named after the original file. // meta object is named after the original file.
@@ -63,7 +65,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number. // length of 13 decimals it makes a 7-digit base-36 number.
// //
// When transactions is set to the norename style, data chunks will // When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transaction identifier // keep their temporary chunk names (with the transacion identifier
// suffix). To distinguish them from temporary chunks, the txn field // suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of // of the metadata file is set to match the transaction identifier of
// the data chunks. // the data chunks.
@@ -77,6 +79,7 @@ import (
// Metadata format v1 does not define any control chunk types, // Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved. // they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc. // In future they can be used to implement resumable uploads etc.
//
const ( const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}` ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s` tempSuffixFormat = `_%04s`
@@ -539,6 +542,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
// //
// xactID is a transaction identifier. Empty xactID denotes active chunk, // xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced. // otherwise temporary chunk name is produced.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string { func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath) dir, parentName := path.Split(filePath)
var name, tempSuffix string var name, tempSuffix string
@@ -704,6 +708,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
// directory together with dead chunks. // directory together with dead chunks.
// In future a flag named like `--chunker-list-hidden` may be added to // In future a flag named like `--chunker-list-hidden` may be added to
// rclone that will tell List to reveal hidden chunks. // rclone that will tell List to reveal hidden chunks.
//
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.base.List(ctx, dir) entries, err = f.base.List(ctx, dir)
if err != nil { if err != nil {
@@ -863,6 +868,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// Note that chunker prefers analyzing file names rather than reading // Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast // the content of meta object assuming that directory scans are fast
// but opening even a small file can be slow on some backends. // but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false) return f.scanObject(ctx, remote, false)
} }
@@ -1037,7 +1043,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
metadata, err := io.ReadAll(reader) metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return err return err
@@ -1078,7 +1084,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object // readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) { func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cached return it now // if xactID has already been read and cahced return it now
if o.xIDCached { if o.xIDCached {
return o.xactID, nil return o.xactID, nil
} }
@@ -1096,7 +1102,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil { if err != nil {
return "", err return "", err
} }
data, err := io.ReadAll(reader) data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows _ = reader.Close() // ensure file handle is freed on windows
if err != nil { if err != nil {
return "", err return "", err
@@ -1580,6 +1586,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// This command will chain to `purge` from wrapped remote. // This command will chain to `purge` from wrapped remote.
// As a result it removes not only composite chunker files with their // As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory. // active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.base.Features().Purge do := f.base.Features().Purge
if do == nil { if do == nil {
@@ -1621,6 +1628,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Unsupported control chunks will get re-picked by a more recent // Unsupported control chunks will get re-picked by a more recent
// rclone version with unexpected results. This can be helped by // rclone version with unexpected results. This can be helped by
// the `delete hidden` flag above or at least the user has been warned. // the `delete hidden` flag above or at least the user has been warned.
//
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
if err := o.f.forbidChunk(o, o.Remote()); err != nil { if err := o.f.forbidChunk(o, o.Remote()); err != nil {
// operations.Move can still call Remove if chunker's Move refuses // operations.Move can still call Remove if chunker's Move refuses
@@ -1796,9 +1804,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1817,9 +1825,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -2117,6 +2125,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// file, then tries to read it from metadata. This in theory // file, then tries to read it from metadata. This in theory
// handles the unusual case when a small file has been tampered // handles the unusual case when a small file has been tampered
// on the level of wrapped remote but chunker is unaware of that. // on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil { if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort return "", err // valid metadata is required to get hash, abort
@@ -2405,6 +2414,7 @@ type metaSimpleJSON struct {
// - for files larger than chunk size // - for files larger than chunk size
// - if file contents can be mistaken as meta object // - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash // - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) { func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
version := metadataVersion version := metadataVersion
if xactID == "" && version == 2 { if xactID == "" && version == 2 {
@@ -2437,6 +2447,7 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
// New format will have a higher version number and cannot be correctly // New format will have a higher version number and cannot be correctly
// handled by current implementation. // handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone. // The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) { func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
// Be strict about JSON format // Be strict about JSON format
// to reduce possibility that a random small file resembles metadata. // to reduce possibility that a random small file resembles metadata.

View File

@@ -5,7 +5,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"io" "io/ioutil"
"path" "path"
"regexp" "regexp"
"strings" "strings"
@@ -59,7 +59,7 @@ var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object { func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: mtime1} item := fstest.Item{Path: name, ModTime: mtime1}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check) _, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message) assert.NotNil(t, obj, message)
return obj return obj
} }
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil { if r == nil {
return return
} }
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()
@@ -440,7 +440,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
checkSmallFile := func(name, contents string) { checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name) filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime} item := fstest.Item{Path: filename, ModTime: modTime}
put := fstests.PutTestContents(ctx, t, f, &item, contents, false) _, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put) assert.NotNil(t, put)
checkSmallFileInternals(put) checkSmallFileInternals(put)
checkContents(put, contents) checkContents(put, contents)
@@ -489,7 +489,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
newFile := func(name string) fs.Object { newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime} item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) _, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj) require.NotNil(t, obj)
return obj return obj
} }
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err) assert.NoError(t, err)
var chunkContents []byte var chunkContents []byte
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r) chunkContents, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx) r, err = willyChunk.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
_, err = io.ReadAll(r) _, err = ioutil.ReadAll(r)
_ = r.Close() _ = r.Close()
}) })
assert.NoError(t, err) assert.NoError(t, err)
@@ -599,7 +599,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) { newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name) filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime} item := fstest.Item{Path: filename, ModTime: modTime}
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true) _, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj) require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj { if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID txnID = chunkObj.xactID
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description) assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description) assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil { if err == nil && r != nil {
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description) assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok") assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close() _ = r.Close()
@@ -716,7 +716,7 @@ func testFutureProof(t *testing.T, f *Fs) {
name = f.makeChunkName(name, part-1, "", "") name = f.makeChunkName(name, part-1, "", "")
} }
item := fstest.Item{Path: name, ModTime: modTime} item := fstest.Item{Path: name, ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true) _, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg) assert.NotNil(t, obj, msg)
} }
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err) assert.Error(t, err)
// Rcat must fail // Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc")) in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil) robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj) assert.Nil(t, robj)
assert.NotNil(t, err) assert.NotNil(t, err)
if err != nil { if err != nil {
@@ -790,7 +790,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
newFile := func(f fs.Fs, name string) (fs.Object, string) { newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name) filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime} item := fstest.Item{Path: filename, ModTime: modTime}
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) _, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj) require.NotNil(t, obj)
return obj, filename return obj, filename
} }
@@ -844,7 +844,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z") modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime} item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef" contents := "abcdef"
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true) _, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile") dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file) dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx) r, err := dstFile.Open(ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, r) assert.NotNil(t, r)
data, err := io.ReadAll(r) data, err := ioutil.ReadAll(r)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, contents, string(data)) assert.Equal(t, contents, string(data))
_ = r.Close() _ = r.Close()

View File

@@ -35,7 +35,6 @@ func TestIntegration(t *testing.T) {
"MimeType", "MimeType",
"GetTier", "GetTier",
"SetTier", "SetTier",
"Metadata",
}, },
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
@@ -54,7 +53,6 @@ func TestIntegration(t *testing.T) {
{Name: name, Key: "type", Value: "chunker"}, {Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir}, {Name: name, Key: "remote", Value: tempDir},
} }
opt.QuickTestOK = true
} }
fstests.Run(t, &opt) fstests.Run(t, &opt)
} }

View File

@@ -1,992 +0,0 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
package combine
/*
Have API to add/remove branches in the combine
*/
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "combine",
Description: "Combine several remotes into one",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "upstreams",
Help: `Upstreams for combining
These should be in the form
dir=remote:path dir2=remote2:path
Where before the = is specified the root directory and after is the remote to
put there.
Embedded spaces can be added using quotes
"dir=remote:path with space" "dir2=remote2:path with space"
`,
Required: true,
Default: fs.SpaceSepList(nil),
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
}
// Fs represents a combine of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
hashSet hash.Set // common hashes
when time.Time // directory times
upstreams map[string]*upstream // map of upstreams
}
// adjustment stores the info to add a prefix to a path or chop characters off
type adjustment struct {
root string
rootSlash string
mountpoint string
mountpointSlash string
}
// newAdjustment makes a new path adjustment adjusting between mountpoint and root
//
// mountpoint is the point the upstream is mounted and root is the combine root
func newAdjustment(root, mountpoint string) (a adjustment) {
return adjustment{
root: root,
rootSlash: root + "/",
mountpoint: mountpoint,
mountpointSlash: mountpoint + "/",
}
}
var errNotUnderRoot = errors.New("file not under root")
// do makes the adjustment on s, mapping an upstream path into a combine path
func (a *adjustment) do(s string) (string, error) {
absPath := join(a.mountpoint, s)
if a.root == "" {
return absPath, nil
}
if absPath == a.root {
return "", nil
}
if !strings.HasPrefix(absPath, a.rootSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.rootSlash):], nil
}
// undo makes the adjustment on s, mapping a combine path into an upstream path
func (a *adjustment) undo(s string) (string, error) {
absPath := join(a.root, s)
if absPath == a.mountpoint {
return "", nil
}
if !strings.HasPrefix(absPath, a.mountpointSlash) {
return "", errNotUnderRoot
}
return absPath[len(a.mountpointSlash):], nil
}
// upstream represents an upstream Fs
type upstream struct {
f fs.Fs
parent *Fs
dir string // directory the upstream is mounted
pathAdjustment adjustment // how to fiddle with the path
}
// Create an upstream from the directory it is mounted on and the remote
func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, error) {
uFs, err := cache.Get(ctx, remote)
if err == fs.ErrorIsFile {
return nil, fmt.Errorf("can't combine files yet, only directories %q: %w", remote, err)
}
if err != nil {
return nil, fmt.Errorf("failed to create upstream %q: %w", remote, err)
}
u := &upstream{
f: uFs,
parent: f,
dir: dir,
pathAdjustment: newAdjustment(f.root, dir),
}
cache.PinUntilFinalized(u.f, u)
return u, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Backward compatible to old config
if len(opt.Upstreams) == 0 {
return nil, errors.New("combine can't point to an empty upstream - check the value of the upstreams setting")
}
for _, u := range opt.Upstreams {
if strings.HasPrefix(u, name+":") {
return nil, errors.New("can't point combine remote at itself - check the value of the upstreams setting")
}
}
isDir := false
for strings.HasSuffix(root, "/") {
root = root[:len(root)-1]
isDir = true
}
f := &Fs{
name: name,
root: root,
opt: *opt,
upstreams: make(map[string]*upstream, len(opt.Upstreams)),
when: time.Now(),
}
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
return fmt.Errorf("no \"=\" in upstream definition %q", upstream)
}
dir, remote := upstream[:equal], upstream[equal+1:]
if dir == "" {
return fmt.Errorf("empty dir in upstream definition %q", upstream)
}
if remote == "" {
return fmt.Errorf("empty remote in upstream definition %q", upstream)
}
if strings.ContainsRune(dir, '/') {
return fmt.Errorf("dirs can't contain / (yet): %q", dir)
}
u, err := f.newUpstream(gCtx, dir, remote)
if err != nil {
return err
}
mu.Lock()
if _, found := f.upstreams[dir]; found {
err = fmt.Errorf("duplicate directory name %q", dir)
} else {
f.upstreams[dir] = u
}
mu.Unlock()
return err
})
}
err = g.Wait()
if err != nil {
return nil, err
}
// check features
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
for _, u := range f.upstreams {
if u.f.Features().ListR != nil {
features.ListR = f.ListR
} else if !u.f.Features().IsLocal {
features.ListR = nil
break
}
}
}
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
if u.f.Features().Purge != nil {
features.Purge = f.Purge
break
}
}
}
// Enable Shutdown when any upstreams support it
if features.Shutdown == nil {
for _, u := range f.upstreams {
if u.f.Features().Shutdown != nil {
features.Shutdown = f.Shutdown
break
}
}
}
// Enable DirCacheFlush when any upstreams support it
if features.DirCacheFlush == nil {
for _, u := range f.upstreams {
if u.f.Features().DirCacheFlush != nil {
features.DirCacheFlush = f.DirCacheFlush
break
}
}
}
// Enable ChangeNotify when any upstreams support it
if features.ChangeNotify == nil {
for _, u := range f.upstreams {
if u.f.Features().ChangeNotify != nil {
features.ChangeNotify = f.ChangeNotify
break
}
}
}
f.features = features
// Get common intersection of hashes
var hashSet hash.Set
var first = true
for _, u := range f.upstreams {
if first {
hashSet = u.f.Hashes()
first = false
} else {
hashSet = hashSet.Overlap(u.f.Hashes())
}
}
f.hashSet = hashSet
// Check to see if the root is actually a file
if f.root != "" && !isDir {
_, err := f.NewObject(ctx, "")
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile || err == fs.ErrorIsDir {
// File doesn't exist or is a directory so return old f
return f, nil
}
return nil, err
}
// Check to see if the root path is actually an existing file
f.root = path.Dir(f.root)
if f.root == "." {
f.root = ""
}
// Adjust path adjustment to remove leaf
for _, u := range f.upstreams {
u.pathAdjustment = newAdjustment(f.root, u.dir)
}
return f, fs.ErrorIsFile
}
return f, nil
}
// Run a function over all the upstreams in parallel
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
}
return g.Wait()
}
// join the elements together but unline path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {
return ""
}
if len(result) > 0 && result[0] == '/' {
result = result[1:]
}
return result
}
// find the upstream for the remote passed in, returning the upstream and the adjusted path
func (f *Fs) findUpstream(remote string) (u *upstream, uRemote string, err error) {
// defer log.Trace(remote, "")("f=%v, uRemote=%q, err=%v", &u, &uRemote, &err)
for _, u := range f.upstreams {
uRemote, err = u.pathAdjustment.undo(remote)
if err == nil {
return u, uRemote, nil
}
}
return nil, "", fmt.Errorf("combine for remote %q: %w", remote, fs.ErrorDirNotFound)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("combine root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Rmdir(ctx, uRemote)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.hashSet
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// The root always exists
if f.root == "" && dir == "" {
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.f.Mkdir(ctx, uRemote)
}
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
err = do(ctx, dir)
} else {
err = operations.Purge(ctx, u.f, dir)
}
return err
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.root == "" && dir == "" {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return u.purge(ctx, "")
})
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
return u.purge(ctx, uRemote)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
return dstU.newObject(o), nil
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
dstU, dstRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
do := dstU.f.Features().Move
useCopy := false
if do == nil {
do = dstU.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantMove
}
useCopy = true
}
o, err := do(ctx, srcObj.Object, dstRemote)
if err != nil {
return nil, err
}
// If did Copy then remove the source object
if useCopy {
err = srcObj.Remove(ctx)
if err != nil {
return nil, err
}
}
return dstU.newObject(o), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
// defer log.Trace(f, "src=%v, srcRemote=%q, dstRemote=%q", src, srcRemote, dstRemote)("err=%v", &err)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
dstU, dstURemote, err := f.findUpstream(dstRemote)
if err != nil {
return err
}
srcU, srcURemote, err := srcFs.findUpstream(srcRemote)
if err != nil {
return err
}
do := dstU.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
fs.Logf(dstU.f, "srcU.f=%v, srcURemote=%q, dstURemote=%q", srcU.f, srcURemote, dstURemote)
return do(ctx, srcU.f, srcURemote, dstURemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
newPath, err := u.pathAdjustment.do(path)
if err != nil {
fs.Logf(f, "ChangeNotify: unable to process %q: %s", path, err)
return
}
fs.Debugf(f, "ChangeNotify: path %q entryType %d", newPath, entryType)
notifyFunc(newPath, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
}
go func() {
for i := range ch {
for _, c := range uChans {
c <- i
}
}
for _, c := range uChans {
close(c)
}
}()
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
ctx := context.Background()
_ = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().DirCacheFlush; do != nil {
do()
}
return nil
})
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
u, uRemote, err := f.findUpstream(srcPath)
if err != nil {
return nil, err
}
uSrc := fs.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
} else {
o, err = u.f.Put(ctx, in, uSrc, options...)
}
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
usage := &fs.Usage{
Total: new(int64),
Used: new(int64),
Trashed: new(int64),
Other: new(int64),
Free: new(int64),
Objects: new(int64),
}
for _, u := range f.upstreams {
doAbout := u.f.Features().About
if doAbout == nil {
continue
}
usg, err := doAbout(ctx)
if errors.Is(err, fs.ErrorDirNotFound) {
continue
}
if err != nil {
return nil, err
}
if usg.Total != nil && usage.Total != nil {
*usage.Total += *usg.Total
} else {
usage.Total = nil
}
if usg.Used != nil && usage.Used != nil {
*usage.Used += *usg.Used
} else {
usage.Used = nil
}
if usg.Trashed != nil && usage.Trashed != nil {
*usage.Trashed += *usg.Trashed
} else {
usage.Trashed = nil
}
if usg.Other != nil && usage.Other != nil {
*usage.Other += *usg.Other
} else {
usage.Other = nil
}
if usg.Free != nil && usage.Free != nil {
*usage.Free += *usg.Free
} else {
usage.Free = nil
}
if usg.Objects != nil && usage.Objects != nil {
*usage.Objects += *usg.Objects
} else {
usage.Objects = nil
}
}
return usage, nil
}
// Wraps entries for this upstream
func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.DirEntries, error) {
for i, entry := range entries {
switch x := entry.(type) {
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
newDir := fs.NewDirCopy(ctx, x)
newPath, err := u.pathAdjustment.do(newDir.Remote())
if err != nil {
return nil, err
}
newDir.SetRemote(newPath)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
}
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewDir(combineDir, f.when)
entries = append(entries, d)
}
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return nil, err
}
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// defer log.Trace(f, "dir=%q, callback=%v", dir, callback)("err=%v", &err)
if f.root == "" && dir == "" {
rootEntries, err := f.List(ctx, "")
if err != nil {
return err
}
err = callback(rootEntries)
if err != nil {
return err
}
var mu sync.Mutex
syncCallback := func(entries fs.DirEntries) error {
mu.Lock()
defer mu.Unlock()
return callback(entries)
}
err = f.multithread(ctx, func(ctx context.Context, u *upstream) error {
return f.ListR(ctx, u.dir, syncCallback)
})
if err != nil {
return err
}
return nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
}
wrapCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
}
if do := u.f.Features().ListR; do != nil {
err = do(ctx, uRemote, wrapCallback)
} else {
err = walk.ListR(ctx, u.f, uRemote, true, -1, walk.ListAll, wrapCallback)
}
if err == fs.ErrorDirNotFound {
err = nil
}
return err
}
// NewObject creates a new remote combine file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
u, uRemote, err := f.findUpstream(remote)
if err != nil {
return nil, err
}
if uRemote == "" || strings.HasSuffix(uRemote, "/") {
return nil, fs.ErrorIsDir
}
o, err := u.f.NewObject(ctx, uRemote)
if err != nil {
return nil, err
}
return u.newObject(o), nil
}
// Precision is the greatest Precision of all upstreams
func (f *Fs) Precision() time.Duration {
var greatestPrecision time.Duration
for _, u := range f.upstreams {
uPrecision := u.f.Precision()
if uPrecision > greatestPrecision {
greatestPrecision = uPrecision
}
}
return greatestPrecision
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.multithread(ctx, func(ctx context.Context, u *upstream) error {
if do := u.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
})
}
// Object describes a wrapped Object
//
// This is a wrapped Object which knows its path prefix
type Object struct {
fs.Object
u *upstream
}
func (u *upstream) newObject(o fs.Object) *Object {
return &Object{
Object: o,
u: u,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.u.parent
}
// String returns the remote path
func (o *Object) String() string {
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
newPath, err := o.u.pathAdjustment.do(o.Object.String())
if err != nil {
fs.Errorf(o, "Bad object: %v", err)
return err.Error()
}
return newPath
}
// MimeType returns the content type of the Object if known
func (o *Object) MimeType(ctx context.Context) (mimeType string) {
if do, ok := o.Object.(fs.MimeTyper); ok {
mimeType = do.MimeType(ctx)
}
return mimeType
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,94 +0,0 @@
package combine
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestAdjustmentDo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "mountpoint/path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "wrongpath/to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.do(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}
func TestAdjustmentUndo(t *testing.T) {
for _, test := range []struct {
root string
mountpoint string
in string
want string
wantErr error
}{
{
root: "",
mountpoint: "mountpoint",
in: "mountpoint/path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint",
mountpoint: "mountpoint",
in: "path/to/file.txt",
want: "path/to/file.txt",
},
{
root: "mountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "path/to/file.txt",
},
{
root: "wrongmountpoint/path",
mountpoint: "mountpoint",
in: "to/file.txt",
want: "",
wantErr: errNotUnderRoot,
},
} {
what := fmt.Sprintf("%+v", test)
a := newAdjustment(test.root, test.mountpoint)
got, gotErr := a.undo(test.in)
assert.Equal(t, test.wantErr, gotErr)
assert.Equal(t, test.want, got, what)
}
}

View File

@@ -1,81 +0,0 @@
// Test Combine filesystem interface
package combine_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 3)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
name := "TestCombineLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
name := "TestCombineMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
QuickTestOK: true,
})
}
func TestMixed(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := MakeTestDirs(t, 2)
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
name := "TestCombineMixed"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":dir1",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "combine"},
{Name: name, Key: "upstreams", Value: upstreams},
},
})
}
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dirs = append(dirs, dir)
}
return dirs
}

View File

@@ -13,6 +13,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"regexp" "regexp"
"strings" "strings"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -53,7 +53,7 @@ const (
Gzip = 2 Gzip = 2
) )
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`) var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
// Register with Fs // Register with Fs
func init() { func init() {
@@ -70,9 +70,6 @@ func init() {
Name: "compress", Name: "compress",
Description: "Compress a remote", Description: "Compress a remote",
NewFs: NewFs, NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to compress.", Help: "Remote to compress.",
@@ -90,7 +87,7 @@ Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6 Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return. generally offers very little return.
Level -2 uses Huffman encoding only. Only use if you know what you Level -2 uses Huffmann encoding only. Only use if you know what you
are doing. are doing.
Level 0 turns off compression.`, Level 0 turns off compression.`,
Default: sgzip.DefaultCompression, Default: sgzip.DefaultCompression,
@@ -130,7 +127,7 @@ type Fs struct {
features *fs.Features // optional features features *fs.Features // optional features
} }
// NewFs constructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
@@ -183,9 +180,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true, SetTier: true,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs // We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true f.features.ReadMimeType = true
@@ -228,7 +222,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
// Separate the filename and size from the extension // Separate the filename and size from the extension
extensionPos := strings.LastIndex(compressedFileName, ".") extensionPos := strings.LastIndex(compressedFileName, ".")
if extensionPos == -1 { if extensionPos == -1 {
return "", "", 0, errors.New("file name has no extension") return "", "", 0, errors.New("File name has no extension")
} }
extension = compressedFileName[extensionPos:] extension = compressedFileName[extensionPos:]
nameWithSize := compressedFileName[:extensionPos] nameWithSize := compressedFileName[:extensionPos]
@@ -237,11 +231,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
} }
match := nameRegexp.FindStringSubmatch(nameWithSize) match := nameRegexp.FindStringSubmatch(nameWithSize)
if match == nil || len(match) != 3 { if match == nil || len(match) != 3 {
return "", "", 0, errors.New("invalid filename") return "", "", 0, errors.New("Invalid filename")
} }
size, err := base64ToInt64(match[2]) size, err := base64ToInt64(match[2])
if err != nil { if err != nil {
return "", "", 0, errors.New("could not decode size") return "", "", 0, errors.New("Could not decode size")
} }
return match[1], gzFileExt, size, nil return match[1], gzFileExt, size, nil
} }
@@ -310,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
case fs.Directory: case fs.Directory:
f.addDir(&newEntries, x) f.addDir(&newEntries, x)
default: default:
return nil, fmt.Errorf("unknown object type %T", entry) return nil, fmt.Errorf("Unknown object type %T", entry)
} }
} }
return newEntries, nil return newEntries, nil
@@ -367,16 +361,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
meta, err := readMetadata(ctx, mo) meta := readMetadata(ctx, mo)
if err != nil { if meta == nil {
return nil, fmt.Errorf("error decoding metadata: %w", err) return nil, errors.New("error decoding metadata")
} }
// Create our Object // Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode)) o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil { return f.newObject(o, mo, meta), err
return nil, err
}
return f.newObject(o, mo, meta), nil
} }
// checkCompressAndType checks if an object is compressible and determines it's mime type // checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -454,7 +445,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...) return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
} }
// Need to include what we already read // Need to include what we allready read
in = &ReadCloserWrapper{ in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in), Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in, Closer: in,
@@ -467,7 +458,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
} }
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-") tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() { defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them // to ignore them
@@ -475,10 +466,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
_ = os.Remove(tempFile.Name()) _ = os.Remove(tempFile.Name())
}() }()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err) return nil, fmt.Errorf("Failed to create temporary local FS to spool file: %w", err)
} }
if _, err = io.Copy(tempFile, in); err != nil { if _, err = io.Copy(tempFile, in); err != nil {
return nil, fmt.Errorf("failed to write temporary local file: %w", err) return nil, fmt.Errorf("Failed to write temporary local file: %w", err)
} }
if _, err = tempFile.Seek(0, 0); err != nil { if _, err = tempFile.Seek(0, 0); err != nil {
return nil, err return nil, err
@@ -545,8 +536,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
} }
// Transfer the data // Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx)) //o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil { if err != nil {
if o != nil { if o != nil {
removeErr := o.Remove(ctx) removeErr := o.Remove(ctx)
@@ -680,7 +671,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
} }
return nil, err return nil, err
} }
return f.newObject(dataObject, mo, meta), nil return f.newObject(dataObject, mo, meta), err
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
@@ -729,23 +720,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) { if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
err = oldObj.(*Object).Object.Remove(ctx) err = oldObj.(*Object).Object.Remove(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't remove original object: %w", err) return nil, fmt.Errorf("Could remove original object: %w", err)
} }
} }
// If our new object is compressed we have to rename it with the correct size. // If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name. // Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
if compressible { if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't rename streamed object: %w", err) return nil, fmt.Errorf("Couldn't rename streamed Object.: %w", err)
} }
newObj.Object = wrapObj newObj.Object = wrapObj
} }
return newObj, nil return newObj, nil
} }
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects // Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work. // will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object // PutUnchecked uploads the object
@@ -788,9 +779,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -838,9 +829,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1043,19 +1034,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
} }
// This function will read the metadata from a metadata object. // This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object // Open our meradata object
rc, err := mo.Open(ctx) rc, err := mo.Open(ctx)
if err != nil { if err != nil {
return nil, err return nil
} }
defer fs.CheckClose(rc, &err) defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc) jr := json.NewDecoder(rc)
meta = new(ObjectMetadata) meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil { if err = jr.Decode(meta); err != nil {
return nil, err return nil
} }
return meta, nil return meta
} }
// Remove removes this object // Remove removes this object
@@ -1100,9 +1096,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote() origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible { if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() { if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil { if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr return removeErr
@@ -1116,9 +1109,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// If we are, just update the object and metadata // If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil { }
return err if err != nil {
} return err
} }
// Update object metadata and return // Update object metadata and return
o.Object = newObject.Object o.Object = newObject.Object
@@ -1129,9 +1122,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1144,9 +1134,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand. // This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object { func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1174,7 +1161,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err return err
} }
if o.meta == nil { if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo) o.meta = readMetadata(ctx, o.mo)
} }
return err return err
} }
@@ -1227,21 +1214,6 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.meta.MimeType return o.meta.MimeType
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
err := o.loadMetadataIfNotLoaded(ctx)
if err != nil {
return nil, err
}
do, ok := o.mo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
@@ -1388,51 +1360,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", nil // cannot know the checksum return "", nil // cannot know the checksum
} }
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.src.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// MimeType returns the content type of the Object if
// known, or "" if not
func (o *ObjectInfo) MimeType(ctx context.Context) string {
do, ok := o.src.(fs.MimeTyper)
if !ok {
return ""
}
return do.MimeType(ctx)
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.src)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.src.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.src.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string { func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer) do, ok := o.Object.(fs.IDer)
@@ -1485,6 +1412,11 @@ var (
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.GetTierer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
) )

View File

@@ -61,6 +61,5 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir}, {Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"}, {Name: name, Key: "compression_mode", Value: "gzip"},
}, },
QuickTestOK: true,
}) })
} }

View File

@@ -96,7 +96,7 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
case "obfuscate": case "obfuscate":
mode = NameEncryptionObfuscated mode = NameEncryptionObfuscated
default: default:
err = fmt.Errorf("unknown file name encryption mode %q", s) err = fmt.Errorf("Unknown file name encryption mode %q", s)
} }
return mode, err return mode, err
} }
@@ -127,11 +127,11 @@ type fileNameEncoding interface {
// RFC4648 // RFC4648
// //
// The standard encoding is modified in two ways // The standard encoding is modified in two ways
// - it becomes lower case (no-one likes upper case filenames!) // * it becomes lower case (no-one likes upper case filenames!)
// - we strip the padding character `=` // * we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{} type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a string using the modified version of // EncodeToString encodes a strign using the modified version of
// base32 encoding. // base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string { func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src) encoded := base32.HexEncoding.EncodeToString(src)
@@ -162,7 +162,7 @@ func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
case "base32768": case "base32768":
enc = base32768.SafeEncoding enc = base32768.SafeEncoding
default: default:
err = fmt.Errorf("unknown file name encoding mode %q", s) err = fmt.Errorf("Unknown file name encoding mode %q", s)
} }
return enc, err return enc, err
} }
@@ -244,7 +244,7 @@ func (c *Cipher) putBlock(buf []byte) {
// encryptSegment encrypts a path segment // encryptSegment encrypts a path segment
// //
// This uses EME with AES. // This uses EME with AES
// //
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the // EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and // 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
@@ -254,8 +254,8 @@ func (c *Cipher) putBlock(buf []byte) {
// same filename must encrypt to the same thing. // same filename must encrypt to the same thing.
// //
// This means that // This means that
// - filenames with the same name will encrypt the same // * filenames with the same name will encrypt the same
// - filenames which start the same won't have a common prefix // * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string { func (c *Cipher) encryptSegment(plaintext string) string {
if plaintext == "" { if plaintext == "" {
return "" return ""
@@ -1085,7 +1085,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// DecryptDataSeek decrypts the data stream from offset // DecryptDataSeek decrypts the data stream from offset
// //
// The open function must return a ReadCloser opened to the offset supplied. // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {

View File

@@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"strings" "strings"
"testing" "testing"
@@ -1072,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize) source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil) encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err) assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted)) decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err) assert.NoError(t, err)
sink := newRandomSource(copySize) sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf) n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1143,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
out, err := io.ReadAll(encrypted) out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.expected, out) assert.Equal(t, test.expected, out)
// Check we can decode the data properly too... // Check we can decode the data properly too...
buf = bytes.NewBuffer(out) buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf)) decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err) assert.NoError(t, err)
out, err = io.ReadAll(decrypted) out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, test.in, out) assert.Equal(t, test.in, out)
} }
@@ -1186,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -1256,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -1273,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data // Make random data
const dataSize = 150000 const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize)) plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err) assert.NoError(t, err)
// Encrypt the data // Encrypt the data
buf := bytes.NewBuffer(plaintext) buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf) encrypted, err := c.EncryptData(buf)
assert.NoError(t, err) assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted) ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err) assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1299,7 +1300,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext) end = len(ciphertext)
} }
} }
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil return reader, nil
} }
@@ -1489,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what) assert.NoError(t, err, what)
continue continue
} }
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
var expectedErr error var expectedErr error
switch { switch {
case i == fileHeaderSize: case i == fileHeaderSize:
@@ -1513,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.Error(t, err, "potato") assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
@@ -1523,13 +1524,13 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16) copy(file16copy, file16)
for i := range file16copy { for i := range file16copy {
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy))) fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize { if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error()) assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh) assert.Nil(t, fh)
} else { } else {
assert.NoError(t, err) assert.NoError(t, err)
_, err = io.ReadAll(fh) _, err = ioutil.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error()) assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
} }
file16copy[i] ^= 0xFF file16copy[i] ^= 0xFF
@@ -1564,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed) assert.Equal(t, 0, cd.closed)
// close after reading // close after reading
out, err := io.ReadAll(fh) out, err := ioutil.ReadAll(fh)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, []byte{1}, out) assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err) assert.Equal(t, io.EOF, fh.err)

View File

@@ -28,9 +28,6 @@ func init() {
Description: "Encrypt/Decrypt a remote", Description: "Encrypt/Decrypt a remote",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp, CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -125,7 +122,7 @@ names, or for debugging purposes.`,
This option could help with shortening the encrypted filename. The This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename suitable option would depend on the way your remote count the filename
length and if it's case sensitive.`, length and if it's case sensitve.`,
Default: "base32", Default: "base32",
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
@@ -235,7 +232,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff, CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true, DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false, WriteMimeType: false,
@@ -244,9 +241,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
SetTier: true, SetTier: true,
GetTier: true, GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err return f, err
@@ -334,7 +328,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
case fs.Directory: case fs.Directory:
f.addDir(ctx, &newEntries, x) f.addDir(ctx, &newEntries, x)
default: default:
return nil, fmt.Errorf("unknown object type %T", entry) return nil, fmt.Errorf("Unknown object type %T", entry)
} }
} }
return newEntries, nil return newEntries, nil
@@ -396,8 +390,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption { if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...) o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil { if err == nil && o != nil {
@@ -415,9 +407,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of // Find a hash the destination supports to compute a hash of
// the encrypted data // the encrypted data
ht := f.Fs.Hashes().GetOne() ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher var hasher *hash.MultiHasher
if ht != hash.None { if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -512,9 +501,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -537,9 +526,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1052,11 +1041,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one // Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok { if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion // Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok { } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Unwrap if it is an operations.OverrideRemote // Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap() srcObj = do.UnWrap()
} else { } else {
// Otherwise don't unwrap any further
return "", nil return "", nil
} }
// if this is wrapping a local object then we work out the hash // if this is wrapping a local object then we work out the hash
@@ -1068,50 +1056,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
return "", nil return "", nil
} }
// GetTier returns storage tier or class of the Object
func (o *ObjectInfo) GetTier() string {
do, ok := o.ObjectInfo.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// ID returns the ID of the Object if known, or "" if not
func (o *ObjectInfo) ID() string {
do, ok := o.ObjectInfo.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.ObjectInfo.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *ObjectInfo) MimeType(ctx context.Context) string {
return ""
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *ObjectInfo) UnWrap() fs.Object {
return fs.UnWrapObjectInfo(o.ObjectInfo)
}
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string { func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer) do, ok := o.Object.(fs.IDer)
@@ -1140,26 +1084,6 @@ func (o *Object) GetTier() string {
return do.GetTier() return do.GetTier()
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// MimeType returns the content type of the Object if
// known, or "" if not
//
// This is deliberately unsupported so we don't leak mime type info by
// default.
func (o *Object) MimeType(ctx context.Context) string {
return ""
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1182,6 +1106,10 @@ var (
_ fs.UserInfoer = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
) )

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from // Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) { func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background()) localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), "")) require.NoError(t, localFs.Rmdir(context.Background(), ""))
}) }
return localFs return localFs, cleanup
} }
// Upload a file to a remote // Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) { func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC) t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil) upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc) obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { cleanup = func() {
require.NoError(t, obj.Remove(context.Background())) require.NoError(t, obj.Remove(context.Background()))
}) }
return obj return obj, cleanup
} }
// Test the ObjectInfo // Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap" path = "_wrap"
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents) obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data // encrypt the data
inBuf := bytes.NewBufferString(contents) inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj var oi fs.ObjectInfo = obj
if wrap { if wrap {
// wrap the object in an fs.ObjectUnwrapper if required // wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote") oi = testWrapper{oi}
} }
// wrap the object in a crypt for upload using the nonce we // wrap the object in a crypt for upload using the nonce we
@@ -76,9 +91,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
src := f.newObjectInfo(oi, nonce) src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods // Test ObjectInfo methods
if !f.opt.NoDataEncryption { assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, int64(outBuf.Len()), src.Size())
}
assert.Equal(t, f, src.Fs()) assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote()) assert.NotEqual(t, path, src.Remote())
@@ -101,13 +114,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs) t.Skipf("%v: does not support hashes", f.Fs)
} }
localFs := makeTempLocalFs(t) localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object // Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents) localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also // Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents) remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object // Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType) computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -4,7 +4,6 @@ package crypt_test
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"testing" "testing"
"github.com/rclone/rclone/backend/crypt" "github.com/rclone/rclone/backend/crypt"
@@ -47,7 +46,6 @@ func TestStandardBase32(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -69,7 +67,6 @@ func TestStandardBase64(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -91,7 +88,6 @@ func TestStandardBase32768(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -113,7 +109,6 @@ func TestOff(t *testing.T) {
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -122,9 +117,6 @@ func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set") t.Skip("Skipping as -remote set")
} }
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3" name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -139,7 +131,6 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }
@@ -148,9 +139,6 @@ func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set") t.Skip("Skipping as -remote set")
} }
if runtime.GOOS == "darwin" {
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate") tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4" name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
@@ -166,6 +154,5 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
}) })
} }

View File

@@ -8,11 +8,11 @@ import "errors"
// Errors Unpad can return // Errors Unpad can return
var ( var (
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded") ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize") ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long") ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short") ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same") ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
) )
// Pad buf using PKCS#7 to a multiple of n. // Pad buf using PKCS#7 to a multiple of n.

View File

@@ -14,9 +14,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"net/http" "net/http"
"os"
"path" "path"
"sort" "sort"
"strconv" "strconv"
@@ -50,7 +50,6 @@ import (
drive_v2 "google.golang.org/api/drive/v2" drive_v2 "google.golang.org/api/drive/v2"
drive "google.golang.org/api/drive/v3" drive "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
"google.golang.org/api/option"
) )
// Constants // Constants
@@ -277,7 +276,6 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point. a non root folder as its starting point.
`, `,
Advanced: true,
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
@@ -451,11 +449,7 @@ If downloading a file returns the error "This file has been identified
as malware or spam and cannot be downloaded" with the error code as malware or spam and cannot be downloaded" with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to "cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone indicate you acknowledge the risks of downloading the file and rclone
will download it anyway. will download it anyway.`,
Note that if you are using service account it will need Manager
permission (not Content Manager) to for this flag to work. If the SA
does not have the right permission, Google will just ignore the flag.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "keep_revision_forever", Name: "keep_revision_forever",
@@ -570,27 +564,6 @@ If this is set then rclone will not show any dangling shortcuts in listings.
`, `,
Advanced: true, Advanced: true,
Default: false, Default: false,
}, {
Name: "resource_key",
Help: `Resource key for accessing a link-shared file.
If you need to access files shared with a link like this
https://drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
Then you will need to use the first part "XXX" as the "root_folder_id"
and the second part "YYY" as the "resource_key" otherwise you will get
404 not found errors when trying to access the directory.
See: https://developers.google.com/drive/api/guides/resource-keys
This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is no needed.
`,
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -652,7 +625,6 @@ type Options struct {
StopOnDownloadLimit bool `config:"stop_on_download_limit"` StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"` SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -678,7 +650,6 @@ type Fs struct {
grouping int32 // number of IDs to search at once in ListR - read with atomic grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu *sync.Mutex // protects listRempties listRmu *sync.Mutex // protects listRempties
listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable listRempties map[string]struct{} // IDs of supposedly empty directories which triggered grouping disable
dirResourceKeys *sync.Map // map directory ID to resource key
} }
type baseObject struct { type baseObject struct {
@@ -761,9 +732,6 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" { } else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err) fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" { } else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received Shared Drive file limit error: %v", err) fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
@@ -833,7 +801,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me // We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe // If we need to list file inside those shared folders, we must search it without sharedWithMe
parentsQuery := bytes.NewBufferString("(") parentsQuery := bytes.NewBufferString("(")
var resourceKeys []string
for _, dirID := range dirIDs { for _, dirID := range dirIDs {
if dirID == "" { if dirID == "" {
continue continue
@@ -854,12 +821,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
} else { } else {
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID) _, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
} }
resourceKey, hasResourceKey := f.dirResourceKeys.Load(dirID)
if hasResourceKey {
resourceKeys = append(resourceKeys, fmt.Sprintf("%s/%s", dirID, resourceKey))
}
} }
resourceKeysHeader := strings.Join(resourceKeys, ",")
if parentsQuery.Len() > 1 { if parentsQuery.Len() > 1 {
_ = parentsQuery.WriteByte(')') _ = parentsQuery.WriteByte(')')
query = append(query, parentsQuery.String()) query = append(query, parentsQuery.String())
@@ -923,7 +885,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
} }
list.SupportsAllDrives(true) list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true) list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive && !f.opt.SharedWithMe { if f.isTeamDrive {
list.DriveId(f.opt.TeamDriveID) list.DriveId(f.opt.TeamDriveID)
list.Corpora("drive") list.Corpora("drive")
} }
@@ -931,10 +893,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
list.Spaces("appDataFolder") list.Spaces("appDataFolder")
} }
// Add resource Keys if necessary
if resourceKeysHeader != "" {
list.Header().Add("X-Goog-Drive-Resource-Keys", resourceKeysHeader)
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields) fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
@@ -1111,7 +1069,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" { if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -1194,16 +1152,15 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci, ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))), pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
m: m, m: m,
grouping: listRGrouping, grouping: listRGrouping,
listRmu: new(sync.Mutex), listRmu: new(sync.Mutex),
listRempties: make(map[string]struct{}), listRempties: make(map[string]struct{}),
dirResourceKeys: new(sync.Map),
} }
f.isTeamDrive = opt.TeamDriveID != "" f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields() f.fileFields = f.getFileFields()
@@ -1213,18 +1170,17 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = oAuthClient f.client = oAuthClient
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client)) f.svc, err = drive.New(f.client)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't create Drive client: %w", err) return nil, fmt.Errorf("couldn't create Drive client: %w", err)
} }
if f.opt.V2DownloadMinSize >= 0 { if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client)) f.v2Svc, err = drive_v2.New(f.client)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err) return nil, fmt.Errorf("couldn't create Drive v2 client: %w", err)
} }
@@ -1266,11 +1222,6 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
f.dirCache = dircache.New(f.root, f.rootFolderID, f) f.dirCache = dircache.New(f.root, f.rootFolderID, f)
// If resource key is set then cache it for the root folder id
if f.opt.ResourceKey != "" {
f.dirResourceKeys.Store(f.rootFolderID, f.opt.ResourceKey)
}
// Parse extensions // Parse extensions
if f.opt.Extensions != "" { if f.opt.Extensions != "" {
if f.opt.ExportExtensions != defaultExportExtensions { if f.opt.ExportExtensions != defaultExportExtensions {
@@ -2068,7 +2019,7 @@ func splitID(compositeID string) (actualID, shortcutID string) {
// isShortcutID returns true if compositeID refers to a shortcut // isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID(compositeID string) bool { func isShortcutID(compositeID string) bool {
return strings.ContainsRune(compositeID, shortcutSeparator) return strings.IndexRune(compositeID, shortcutSeparator) >= 0
} }
// actualID returns an actual ID from a composite ID // actualID returns an actual ID from a composite ID
@@ -2139,10 +2090,6 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
case item.MimeType == driveFolderType: case item.MimeType == driveFolderType:
// cache the directory ID for later lookups // cache the directory ID for later lookups
f.dirCache.Put(remote, item.Id) f.dirCache.Put(remote, item.Id)
// cache the resource key for later lookups
if item.ResourceKey != "" {
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
}
when, _ := time.Parse(timeFormatIn, item.ModifiedTime) when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id) d := fs.NewDir(remote, when).SetID(item.Id)
if len(item.Parents) > 0 { if len(item.Parents) > 0 {
@@ -2184,7 +2131,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -2226,10 +2173,10 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType) exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
if exportExt == "" { if exportExt == "" {
return nil, fmt.Errorf("no export format found for %q", importMimeType) return nil, fmt.Errorf("No export format found for %q", importMimeType)
} }
if exportExt != srcExt && !f.opt.AllowImportNameChange { if exportExt != srcExt && !f.opt.AllowImportNameChange {
return nil, fmt.Errorf("can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt) return nil, fmt.Errorf("Can't convert %q to a document with a different export filetype (%q)", srcExt, exportExt)
} }
} }
} }
@@ -2418,9 +2365,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -2534,7 +2481,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.opt.TrashedOnly { if f.opt.TrashedOnly {
return errors.New("can't purge with --drive-trashed-only, use delete if you want to selectively delete files") return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
} }
return f.purgeCheck(ctx, dir, false) return f.purgeCheck(ctx, dir, false)
} }
@@ -2653,9 +2600,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -3000,12 +2947,12 @@ func (f *Fs) changeServiceAccountFile(ctx context.Context, file string) (err err
return fmt.Errorf("drive: failed when making oauth client: %w", err) return fmt.Errorf("drive: failed when making oauth client: %w", err)
} }
f.client = oAuthClient f.client = oAuthClient
f.svc, err = drive.NewService(context.Background(), option.WithHTTPClient(f.client)) f.svc, err = drive.New(f.client)
if err != nil { if err != nil {
return fmt.Errorf("couldn't create Drive client: %w", err) return fmt.Errorf("couldn't create Drive client: %w", err)
} }
if f.opt.V2DownloadMinSize >= 0 { if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.NewService(context.Background(), option.WithHTTPClient(f.client)) f.v2Svc, err = drive_v2.New(f.client)
if err != nil { if err != nil {
return fmt.Errorf("couldn't create Drive v2 client: %w", err) return fmt.Errorf("couldn't create Drive v2 client: %w", err)
} }
@@ -3294,7 +3241,7 @@ This will return a JSON list of objects like this
With the -o config parameter it will output the list in a format With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the suitable for adding to a config file to make aliases for all the
drives found and a combined drive. drives found.
[My Drive] [My Drive]
type = alias type = alias
@@ -3304,15 +3251,10 @@ drives found and a combined drive.
type = alias type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=: remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
[AllDrives]
type = combine
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be be accessible with the aliases shown. This may require manual editing
substituted with "_" and duplicate names will have numbers suffixed. of the names.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.
`, `,
}, { }, {
Name: "untrash", Name: "untrash",
@@ -3326,9 +3268,9 @@ This takes an optional directory to trash which make this easier to
use via the API. use via the API.
rclone backend untrash drive:directory rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir rclone backend -i untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it. Use the -i flag to see what would be restored before restoring it.
Result: Result:
@@ -3358,14 +3300,8 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be If the destination is a drive backend then server-side copying will be
attempted if possible. attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying. Use the -i flag to see what would be copied before copying.
`, `,
}, {
Name: "exportformats",
Short: "Dump the export formats for debug purposes",
}, {
Name: "importformats",
Short: "Dump the import formats for debug purposes",
}} }}
// Command the backend to run a named command // Command the backend to run a named command
@@ -3385,7 +3321,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
out["service_account_file"] = f.opt.ServiceAccountFile out["service_account_file"] = f.opt.ServiceAccountFile
} }
if _, ok := opt["chunk_size"]; ok { if _, ok := opt["chunk_size"]; ok {
out["chunk_size"] = f.opt.ChunkSize.String() out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
} }
return out, nil return out, nil
case "set": case "set":
@@ -3402,11 +3338,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} }
if chunkSize, ok := opt["chunk_size"]; ok { if chunkSize, ok := opt["chunk_size"]; ok {
chunkSizeMap := make(map[string]string) chunkSizeMap := make(map[string]string)
chunkSizeMap["previous"] = f.opt.ChunkSize.String() chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
if err = f.changeChunkSize(chunkSize); err != nil { if err = f.changeChunkSize(chunkSize); err != nil {
return out, err return out, err
} }
chunkSizeString := f.opt.ChunkSize.String() chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
f.m.Set("chunk_size", chunkSizeString) f.m.Set("chunk_size", chunkSizeString)
chunkSizeMap["current"] = chunkSizeString chunkSizeMap["current"] = chunkSizeString
out["chunk_size"] = chunkSizeMap out["chunk_size"] = chunkSizeMap
@@ -3436,27 +3372,12 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} }
if _, ok := opt["config"]; ok { if _, ok := opt["config"]; ok {
lines := []string{} lines := []string{}
upstreams := []string{} for _, drive := range drives {
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name)
for {
if _, found := names[name]; !found {
break
}
name += fmt.Sprintf("-%d", i)
}
names[name] = struct{}{}
lines = append(lines, "") lines = append(lines, "")
lines = append(lines, fmt.Sprintf("[%s]", name)) lines = append(lines, fmt.Sprintf("[%s]", drive.Name))
lines = append(lines, "type = alias") lines = append(lines, fmt.Sprintf("type = alias"))
lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id)) lines = append(lines, fmt.Sprintf("remote = %s,team_drive=%s,root_folder_id=:", f.name, drive.Id))
upstreams = append(upstreams, fmt.Sprintf(`"%s=%s:"`, name, name))
} }
lines = append(lines, "")
lines = append(lines, "[AllDrives]")
lines = append(lines, "type = combine")
lines = append(lines, fmt.Sprintf("upstreams = %s", strings.Join(upstreams, " ")))
return lines, nil return lines, nil
} }
return drives, nil return drives, nil
@@ -3479,10 +3400,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
} }
} }
return nil, nil return nil, nil
case "exportformats":
return f.exportFormats(ctx), nil
case "importformats":
return f.importFormats(ctx), nil
default: default:
return nil, fs.ErrorCommandNotFound return nil, fs.ErrorCommandNotFound
} }
@@ -3532,6 +3449,12 @@ func (o *baseObject) Size() int64 {
return o.bytes return o.bytes
} }
// getRemoteInfo returns a drive.File for the remote
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
return
}
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) { info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
@@ -3572,6 +3495,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
// //
//
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time { func (o *baseObject) ModTime(ctx context.Context) time.Time {
@@ -3715,7 +3639,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "acknowledgeAbuse=true" url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options) _, res, err = o.httpResponse(ctx, url, "GET", options)
} else { } else {
err = fmt.Errorf("use the --drive-acknowledge-abuse flag to download this file: %w", err) err = fmt.Errorf("Use the --drive-acknowledge-abuse flag to download this file: %w", err)
} }
} }
if err != nil { if err != nil {
@@ -3802,7 +3726,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit] data = data[:limit]
} }
return io.NopCloser(bytes.NewReader(data)), nil return ioutil.NopCloser(bytes.NewReader(data)), nil
} }
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader, func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
@@ -3828,7 +3752,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
// Update the already existing object // Update the already existing object
// //
// Copy the reader into the object updating modTime and size. // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"mime" "mime"
"os" "os"
"path" "path"
@@ -18,7 +19,6 @@ import (
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync" "github.com/rclone/rclone/fs/sync"
@@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
) )
func TestDriveScopes(t *testing.T) { func TestDriveScopes(t *testing.T) {
@@ -77,7 +76,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing // Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) { func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {}) fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json")) buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct { var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"` ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"` ImportFormats map[string][]string `json:"importFormats,omitempty"`
@@ -191,60 +190,6 @@ func TestExtensionsForImportFormats(t *testing.T) {
} }
} }
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
ctx := context.Background()
gatewayTimeout := googleapi.Error{
Code: 503,
}
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
assert.True(t, timeoutRetry)
assert.Equal(t, &gatewayTimeout, timeoutError)
generic403 := googleapi.Error{
Code: 403,
}
rLEItem := googleapi.ErrorItem{
Reason: "rateLimitExceeded",
Message: "User rate limit exceeded.",
}
generic403.Errors = append(generic403.Errors, rLEItem)
oldStopUpload := f.opt.StopOnUploadLimit
oldStopDownload := f.opt.StopOnDownloadLimit
f.opt.StopOnUploadLimit = true
f.opt.StopOnDownloadLimit = true
defer func() {
f.opt.StopOnUploadLimit = oldStopUpload
f.opt.StopOnDownloadLimit = oldStopDownload
}()
expectedRLError := fserrors.FatalError(&generic403)
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
assert.False(t, rateLimitRetry)
assert.Equal(t, rateLimitErr, expectedRLError)
dQEItem := googleapi.ErrorItem{
Reason: "downloadQuotaExceeded",
}
generic403.Errors[0] = dQEItem
expectedDQError := fserrors.FatalError(&generic403)
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
assert.False(t, downloadQuotaRetry)
assert.Equal(t, downloadQuotaError, expectedDQError)
tDFLEItem := googleapi.ErrorItem{
Reason: "teamDriveFileLimitExceeded",
}
generic403.Errors[0] = tDFLEItem
expectedTDFLError := fserrors.FatalError(&generic403)
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
assert.False(t, teamDriveFileLimitRetry)
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
qEItem := googleapi.ErrorItem{
Reason: "quotaExceeded",
}
generic403.Errors[0] = qEItem
expectedQuotaError := fserrors.FatalError(&generic403)
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) { func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true f.opt.AllowImportNameChange = true
@@ -433,9 +378,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
// Make some objects, one in a subdir // Make some objects, one in a subdir
contents := random.String(100) contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now()) file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false) _, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now()) file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false) _, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects // Check objects
checkObjects := func() { checkObjects := func() {
@@ -517,9 +462,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) { func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)
@@ -554,7 +496,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"} file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true) _, _ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
// validate sync/copy // validate sync/copy
const timeQuery = "(modifiedTime >= '" const timeQuery = "(modifiedTime >= '"
@@ -603,7 +545,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("UnTrash", f.InternalTestUnTrash) t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID) t.Run("CopyID", f.InternalTestCopyID)
t.Run("AgeQuery", f.InternalTestAgeQuery) t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -304,12 +304,9 @@ outer:
// //
// Can be called from atexit handler // Can be called from atexit handler
func (b *batcher) Shutdown() { func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() { b.shutOnce.Do(func() {
atexit.Unregister(b.atexit) atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...") fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down // show that batcher is shutting down
close(b.closed) close(b.closed)
// quit the commitLoop by sending a quitRequest message // quit the commitLoop by sending a quitRequest message

View File

@@ -268,7 +268,7 @@ default based on the batch_mode in use.
Advanced: true, Advanced: true,
}, { }, {
Name: "batch_commit_timeout", Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`, Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute), Default: fs.Duration(10 * time.Minute),
Advanced: true, Advanced: true,
}, { }, {
@@ -472,12 +472,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
args := team.NewMembersGetInfoArgs(members) args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args) memberIds, err := f.team.MembersGetInfo(args)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err) return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
} }
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
} }
@@ -925,7 +923,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -1044,9 +1042,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1105,9 +1103,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1199,7 +1197,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return return
} }
if len(listRes.Links) == 0 { if len(listRes.Links) == 0 {
err = errors.New("sharing link already exists, but list came back empty") err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
return return
} }
linkRes = listRes.Links[0] linkRes = listRes.Links[0]
@@ -1211,7 +1209,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
case *sharing.FolderLinkMetadata: case *sharing.FolderLinkMetadata:
link = res.Url link = res.Url
default: default:
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res) err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
} }
} }
return return
@@ -1435,7 +1433,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
} }
if entryPath != "" { if entryPath != "" {
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType) notifyFunc(entryPath, entryType)
} }
} }
if !changeList.HasMore { if !changeList.HasMore {
@@ -1669,7 +1667,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset) delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta skip += delta
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip) what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 { if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what) return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize { } else if skip == chunkSize {
@@ -1697,9 +1695,6 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
if size > 0 { if size > 0 {
// if size is known, check if next chunk is final // if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize) appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
if in.BytesRead() > uint64(size) {
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
}
} else { } else {
// if size is unknown, upload as long as we can read full chunks from the reader // if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize) appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
@@ -1763,7 +1758,7 @@ func checkPathLength(name string) (err error) {
// Update the already existing object // Update the already existing object
// //
// Copy the reader into the object updating modTime and size. // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -28,44 +28,25 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded 509, // Bandwidth Limit Exceeded
} }
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0
}
return code
}
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) { if fserrors.ContextError(ctx, &err) {
return false, err return false, err
} }
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with // Detect this error which the integration tests provoke
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}" // error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
// //
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly // https://1fichier.com/api.html
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke //
// The list below is far from complete and should be expanded if we see any more error codes. // file/ls.cgi is limited :
if err != nil { //
switch parseFichierError(err) { // Warning (can be changed in case of abuses) :
case 93: // List all files of the account is limited to 1 request per hour.
return false, err // No such user // List folders is limited to 5 000 results and 1 request per folder per 30s.
case 186: if err != nil && strings.Contains(err.Error(), "Flood detected") {
return false, err // IP blocked? fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
case 374: time.Sleep(30 * time.Second)
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
default:
}
} }
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
@@ -487,7 +468,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
fileName = f.opt.Enc.FromStandardName(fileName) fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("invalid UploadID") return nil, errors.New("Invalid UploadID")
} }
opts := rest.Opts{ opts := rest.Opts{
@@ -529,7 +510,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
// fs.Debugf(f, "Ending File Upload `%s`", uploadID) // fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("invalid UploadID") return nil, errors.New("Invalid UploadID")
} }
opts := rest.Opts{ opts := rest.Opts{

View File

@@ -1,4 +1,3 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier package fichier
import ( import (
@@ -295,7 +294,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
path, ok := f.dirCache.GetInv(directoryID) path, ok := f.dirCache.GetInv(directoryID)
if !ok { if !ok {
return nil, errors.New("cannot find dir in dircache") return nil, errors.New("Cannot find dir in dircache")
} }
return f.newObjectFromFile(ctx, path, file), nil return f.newObjectFromFile(ctx, path, file), nil

View File

@@ -84,7 +84,7 @@ type CopyFileResponse struct {
URLs []FileCopy `json:"urls"` URLs []FileCopy `json:"urls"`
} }
// FileCopy is used in the CopyFileResponse // FileCopy is used in the the CopyFileResponse
type FileCopy struct { type FileCopy struct {
FromURL string `json:"from_url"` FromURL string `json:"from_url"`
ToURL string `json:"to_url"` ToURL string `json:"to_url"`

View File

@@ -19,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"` timeFormatJSON = `"` + timeFormatParameters + `"`
) )
// Time represents date and time information for the // Time represents represents date and time information for the
// filefabric API // filefabric API
type Time time.Time type Time time.Time
@@ -95,7 +95,7 @@ type Status struct {
// Warning string `json:"warning"` // obsolete // Warning string `json:"warning"` // obsolete
} }
// Status satisfies the error interface // Status statisfies the error interface
func (e *Status) Error() string { func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code) return fmt.Sprintf("%s (%s)", e.Message, e.Code)
} }

View File

@@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -149,7 +150,7 @@ type Fs struct {
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
m configmap.Mapper // to save config m configmap.Mapper // to save config
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token tokenMu sync.Mutex // hold when reading the token
@@ -372,7 +373,7 @@ type params map[string]interface{}
// rpc calls the rpc.php method of the SME file fabric // rpc calls the rpc.php method of the SME file fabric
// //
// This is an entry point to all the method calls. // This is an entry point to all the method calls
// //
// If result is nil then resp.Body will need closing // If result is nil then resp.Body will need closing
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) { func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
@@ -489,7 +490,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Root is a dir - cache its ID // Root is a dir - cache its ID
f.dirCache.Put(f.root, info.ID) f.dirCache.Put(f.root, info.ID)
} }
//} else { } else {
// Root is not found so a directory // Root is not found so a directory
} }
} }
@@ -677,7 +678,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it // must have setMetaData called on it
// //
// Returns the object, leaf, directoryID and error. // Returns the object, leaf, directoryID and error
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
@@ -696,7 +697,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -782,9 +783,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -842,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false) return f.purgeCheck(ctx, dir, false)
} }
// Wait for the background task to complete if necessary // Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) { func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" { if taskID == "" || taskID == "0" {
// No task to wait for // No task to wait for
@@ -955,9 +956,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1134,6 +1135,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
// //
//
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
@@ -1185,7 +1187,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
if o.contentType == emptyMimeType { if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
} }
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{ resp, err := o.fs.rpc(ctx, "getFile", params{
@@ -1199,7 +1201,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// If existing is set then it updates the object rather than creating a new one. // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/rclone/ftp" "github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -45,7 +45,7 @@ const (
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "ftp", Name: "ftp",
Description: "FTP", Description: "FTP Connection",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "host", Name: "host",
@@ -70,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTPS.`, than port 21. Cannot be used in combination with explicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "explicit_tls", Name: "explicit_tls",
@@ -78,25 +78,11 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
When using explicit FTP over TLS the client explicitly requests When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTPS.`, to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false, Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited. Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, { }, {
@@ -124,11 +110,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)", Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
@@ -210,7 +191,6 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"` DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"` DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"` WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
@@ -336,44 +316,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address) conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if err != nil { if f.tlsConf != nil && err == nil {
return nil, err conn = tls.Client(conn, f.tlsConf)
} }
// Connect using cleartext only for non TLS return
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
} }
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -381,6 +331,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -397,9 +353,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM { if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true)) ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
} }
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }
@@ -534,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
protocol = "ftps://" protocol = "ftps://"
} }
if opt.TLS && opt.ExplicitTLS { if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
} }
var tlsConfig *tls.Config var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS { if opt.TLS || opt.ExplicitTLS {
@@ -657,7 +610,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
if remote == "" || remote == "." || remote == "/" { fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry // if root, assume exists and synthesize an entry
return &ftp.Entry{ return &ftp.Entry{
Name: "", Name: "",
@@ -665,32 +619,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(), Time: time.Now(),
}, nil }, nil
} }
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("findItem: %w", err) return nil, fmt.Errorf("findItem: %w", err)
} }
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -709,7 +644,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -731,7 +666,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not // dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, path.Join(f.root, remote)) entry, err := f.findItem(ctx, remote)
if err != nil { if err != nil {
return false, fmt.Errorf("dirExists: %w", err) return false, fmt.Errorf("dirExists: %w", err)
} }
@@ -783,7 +718,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
case <-timer.C: case <-timer.C:
// if timer fired assume no error but connection dead // if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List") fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("timeout when waiting for List") return nil, errors.New("Timeout when waiting for List")
} }
// Annoyingly FTP returns success for a directory which // Annoyingly FTP returns success for a directory which
@@ -834,12 +769,11 @@ func (f *Fs) Hashes() hash.Set {
// Precision shows whether modified time is supported or not depending on the // Precision shows whether modified time is supported or not depending on the
// FTP server capabilities, namely whether FTP server: // FTP server capabilities, namely whether FTP server:
// - accepts the MDTM command to get file time (fGetTime) // - accepts the MDTM command to get file time (fGetTime)
// or supports MLSD returning precise file time in the list (fLstTime) // or supports MLSD returning precise file time in the list (fLstTime)
// - accepts the MFMT command to set file time (fSetTime) // - accepts the MFMT command to set file time (fSetTime)
// or non-standard form of the MDTM command (fSetTime, too) // or non-standard form of the MDTM command (fSetTime, too)
// used by VsFtpd for the same purpose (WritingMDTM) // used by VsFtpd for the same purpose (WritingMDTM)
//
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html // See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
if (f.fGetTime || f.fLstTime) && f.fSetTime { if (f.fGetTime || f.fLstTime) && f.fSetTime {
@@ -875,18 +809,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) { func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
file, err := f.findItem(ctx, remote) dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("getInfo: %w", err)
} else if file != nil { }
info := &FileInfo{ files, err := c.List(f.dirFromStandardPath(dir))
Name: remote, f.putFtpConnection(&c, err)
Size: file.Size, if err != nil {
ModTime: file.Time, return nil, translateErrorFile(err)
precise: f.fLstTime, }
IsDir: file.Type == ftp.EntryTypeFolder,
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
} }
return info, nil
} }
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
} }
@@ -1201,7 +1149,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Update the already existing object // Update the already existing object
// //
// Copy the reader into the object updating modTime and size. // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout // test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) { func (f *Fs) testUploadTimeout(t *testing.T) {
const ( const (
fileSize = 100000000 // 100 MiB fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup maxTime = 10 * time.Second // prevent test hangup
) )
if testing.Short() { if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -44,7 +44,6 @@ import (
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
option "google.golang.org/api/option"
// NOTE: This API is deprecated // NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1" storage "google.golang.org/api/storage/v1"
@@ -307,22 +306,20 @@ rclone does if you know the bucket exists already.
Default: false, Default: false,
Advanced: true, Advanced: true,
}, { }, {
Name: "decompress", Name: "download_compressed",
Help: `If set this will decompress gzip encoded objects. Help: `If set this will download compressed objects as-is.
It is possible to upload objects to GCS with "Content-Encoding: gzip" It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects. set. Normally rclone will transparently decompress these files on
download. This means that rclone can't check the hash or the size of
the file as both of these refer to the compressed object.
If this flag is set then rclone will decompress these files with If this flag is set then rclone will download files with
"Content-Encoding: gzip" as they are received. This means that rclone "Content-Encoding: gzip" as they are received. This means that rclone
can't check the size and hash but the file contents will be decompressed. can check the size and hash but the file contents will be compressed.
`, `,
Advanced: true, Advanced: true,
Default: false, Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -346,8 +343,7 @@ type Options struct {
Location string `config:"location"` Location string `config:"location"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"` NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"` DownloadCompressed bool `config:"download_compressed"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -395,7 +391,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.rootBucket == "" { if f.rootBucket == "" {
return "GCS root" return fmt.Sprintf("GCS root")
} }
if f.rootDirectory == "" { if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket) return fmt.Sprintf("GCS bucket %s", f.rootBucket)
@@ -487,7 +483,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err) return nil, fmt.Errorf("error opening service account credentials file: %w", err)
} }
@@ -528,11 +524,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = oAuthClient f.client = oAuthClient
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)} f.svc, err = storage.New(f.client)
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err) return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
} }
@@ -589,7 +581,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// //
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// //
// Set recurse to read sub directories. // Set recurse to read sub directories
// //
// The remote has prefix removed from it and if addBucket is set // The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start. // then it adds the bucket to the start.
@@ -807,7 +799,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Put the object into the bucket // Put the object into the bucket
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
@@ -909,9 +901,9 @@ func (f *Fs) Precision() time.Duration {
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1043,9 +1035,12 @@ func (o *Object) setMetaData(info *storage.Object) {
} }
// If gunzipping then size and md5sum are unknown // If gunzipping then size and md5sum are unknown
if o.gzipped && o.fs.opt.Decompress { if o.gzipped && !o.fs.opt.DownloadCompressed {
o.bytes = -1 o.bytes = -1
o.md5sum = "" o.md5sum = ""
o.fs.warnCompressed.Do(func() {
fs.Logf(o.fs, "Decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-download-compressed to override")
})
} }
} }
@@ -1147,7 +1142,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err return nil, err
} }
fs.FixRangeOption(options, o.bytes) fs.FixRangeOption(options, o.bytes)
if o.gzipped && !o.fs.opt.Decompress { if o.gzipped && o.fs.opt.DownloadCompressed {
// Allow files which are stored on the cloud storage system // Allow files which are stored on the cloud storage system
// compressed to be downloaded without being decompressed. Note // compressed to be downloaded without being decompressed. Note
// that setting this here overrides the automatic decompression // that setting this here overrides the automatic decompression
@@ -1155,9 +1150,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// //
// See: https://cloud.google.com/storage/docs/transcoding // See: https://cloud.google.com/storage/docs/transcoding
req.Header.Set("Accept-Encoding", "gzip") req.Header.Set("Accept-Encoding", "gzip")
o.fs.warnCompressed.Do(func() {
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
})
} }
fs.OpenOptionAddHTTPHeaders(req.Header, options) fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response var res *http.Response

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Google Photos API.
package api package api
import ( import (

View File

@@ -178,7 +178,7 @@ type Fs struct {
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the one drive server
ts *oauthutil.TokenSource // token source for oauth2 ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps startTime time.Time // time Fs was started - used for datestamps
@@ -661,7 +661,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Put the object into the bucket // Put the object into the bucket
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {

View File

@@ -3,7 +3,7 @@ package googlephotos
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"path" "path"
"testing" "testing"
@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -36,7 +37,7 @@ func TestIntegration(t *testing.T) {
} }
f, err := fs.NewFs(ctx, *fstest.RemoteName) f, err := fs.NewFs(ctx, *fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile { if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err) t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
} }
require.NoError(t, err) require.NoError(t, err)
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) { t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx) in, err := dstObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
buf, err := io.ReadAll(in) buf, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, in.Close()) require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000) assert.True(t, len(buf) > 1000)
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()

View File

@@ -315,7 +315,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// featureFilter creates a filter for the Feature enum // featureFilter creates a filter for the Feature enum
// //
// The API only supports one feature, FAVORITES, so hardcode that feature. // The API only supports one feature, FAVORITES, so hardcode that feature
// //
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter // https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) { func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {

View File

@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
// mock listUploads for testing // mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries = f.uploaded[dir] entries, _ = f.uploaded[dir]
return entries, nil return entries, nil
} }

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil { if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err) fs.Errorf(nil, "%s: failed to import: %v", remote, err)
} }
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err) accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
doneCount++ doneCount++
} }
}) })

View File

@@ -27,9 +27,6 @@ func init() {
Name: "hasher", Name: "hasher",
Description: "Better checksums for other remotes", Description: "Better checksums for other remotes",
NewFs: NewFs, NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
CommandHelp: commandHelp, CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
@@ -161,11 +158,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
IsLocal: true, IsLocal: true,
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
} }
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs) f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
@@ -493,17 +485,6 @@ func (o *Object) MimeType(ctx context.Context) string {
return "" return ""
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
do, ok := o.Object.(fs.Metadataer)
if !ok {
return nil, nil
}
return do.Metadata(ctx)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -526,5 +507,10 @@ var (
_ fs.UserInfoer = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil) _ fs.Shutdowner = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
) )

View File

@@ -19,7 +19,7 @@ import (
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object { func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z") mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: name, ModTime: mtime1} item := fstest.Item{Path: name, ModTime: mtime1}
o := fstests.PutTestContents(ctx, t, f, &item, data, true) _, o := fstests.PutTestContents(ctx, t, f, &item, data, true)
require.NotNil(t, o) require.NotNil(t, o)
return o return o
} }
@@ -35,7 +35,7 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
// make a temporary crypt remote // make a temporary crypt remote
ctx := context.Background() ctx := context.Background()
pass := obscure.MustObscure("crypt") pass := obscure.MustObscure("crypt")
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass) remote := fmt.Sprintf(":crypt,remote=%s,password=%s:", tempRoot, pass)
cryptFs, err := fs.NewFs(ctx, remote) cryptFs, err := fs.NewFs(ctx, remote)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -33,7 +33,6 @@ func TestIntegration(t *testing.T) {
{Name: "TestHasher", Key: "remote", Value: tempDir}, {Name: "TestHasher", Key: "remote", Value: tempDir},
} }
opt.RemoteName = "TestHasher:" opt.RemoteName = "TestHasher:"
opt.QuickTestOK = true
} }
fstests.Run(t, &opt) fstests.Run(t, &opt)
} }

View File

@@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"path" "path"
"time" "time"
@@ -117,7 +118,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() { defer func() {
_ = r.Close() _ = r.Close()
}() }()
if _, err = io.Copy(io.Discard, r); err != nil { if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err) fs.Infof(o, "update failed (copy): %v", err)
return err return err
} }

View File

@@ -92,7 +92,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.ServicePrincipalName != "" { if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient() options.KerberosClient, err = getKerberosClient()
if err != nil { if err != nil {
return nil, fmt.Errorf("problem with kerberos authentication: %w", err) return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
} }
options.KerberosServicePrincipleName = opt.ServicePrincipalName options.KerberosServicePrincipleName = opt.ServicePrincipalName
@@ -265,9 +265,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //

View File

@@ -1,7 +1,6 @@
//go:build !plan9 //go:build !plan9
// +build !plan9 // +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs package hdfs
import ( import (

View File

@@ -115,7 +115,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
_, err = o.fs.client.Stat(realpath) info, err := o.fs.client.Stat(realpath)
if err == nil { if err == nil {
err = o.fs.client.Remove(realpath) err = o.fs.client.Remove(realpath)
if err != nil { if err != nil {
@@ -147,7 +147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
info, err := o.fs.client.Stat(realpath) info, err = o.fs.client.Stat(realpath)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,81 +0,0 @@
package api
import (
"encoding/json"
"net/url"
"path"
"strings"
"time"
)
// Some presets for different amounts of information that can be requested for fields;
// it is recommended to only request the information that is actually needed.
var (
HiDriveObjectNoMetadataFields = []string{"name", "type"}
HiDriveObjectWithMetadataFields = append(HiDriveObjectNoMetadataFields, "id", "size", "mtime", "chash")
HiDriveObjectWithDirectoryMetadataFields = append(HiDriveObjectWithMetadataFields, "nmembers")
DirectoryContentFields = []string{"nmembers"}
)
// QueryParameters represents the parameters passed to an API-call.
type QueryParameters struct {
url.Values
}
// NewQueryParameters initializes an instance of QueryParameters and
// returns a pointer to it.
func NewQueryParameters() *QueryParameters {
return &QueryParameters{url.Values{}}
}
// SetFileInDirectory sets the appropriate parameters
// to specify a path to a file in a directory.
// This is used by requests that work with paths for files that do not exist yet.
// (For example when creating a file).
// Most requests use the format produced by SetPath(...).
func (p *QueryParameters) SetFileInDirectory(filePath string) {
directory, file := path.Split(path.Clean(filePath))
p.Set("dir", path.Clean(directory))
p.Set("name", file)
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetPath sets the appropriate parameters to access the given path.
func (p *QueryParameters) SetPath(objectPath string) {
p.Set("path", path.Clean(objectPath))
// NOTE: It would be possible to switch to pid-based requests
// by modifying this function.
}
// SetTime sets the key to the time-value. It replaces any existing values.
func (p *QueryParameters) SetTime(key string, value time.Time) error {
valueAPI := Time(value)
valueBytes, err := json.Marshal(&valueAPI)
if err != nil {
return err
}
p.Set(key, string(valueBytes))
return nil
}
// AddList adds the given values as a list
// with each value separated by the separator.
// It appends to any existing values associated with key.
func (p *QueryParameters) AddList(key string, separator string, values ...string) {
original := p.Get(key)
p.Set(key, strings.Join(values, separator))
if original != "" {
p.Set(key, original+separator+p.Get(key))
}
}
// AddFields sets the appropriate parameter to access the given fields.
// The given fields will be appended to any other existing fields.
func (p *QueryParameters) AddFields(prefix string, fields ...string) {
modifiedFields := make([]string, len(fields))
for i, field := range fields {
modifiedFields[i] = prefix + field
}
p.AddList("fields", ",", modifiedFields...)
}

View File

@@ -1,135 +0,0 @@
// Package api has type definitions and code related to API-calls for the HiDrive-API.
package api
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"time"
)
// Time represents date and time information for the API.
type Time time.Time
// MarshalJSON turns Time into JSON (in Unix-time/UTC).
func (t *Time) MarshalJSON() ([]byte, error) {
secs := time.Time(*t).Unix()
return []byte(strconv.FormatInt(secs, 10)), nil
}
// UnmarshalJSON turns JSON into Time.
func (t *Time) UnmarshalJSON(data []byte) error {
secs, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Time(time.Unix(secs, 0))
return nil
}
// Error is returned from the API when things go wrong.
type Error struct {
Code json.Number `json:"code"`
ContextInfo json.RawMessage
Message string `json:"msg"`
}
// Error returns a string for the error and satisfies the error interface.
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q", e.Code.String())
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error satisfies the error interface.
var _ error = (*Error)(nil)
// possible types for HiDriveObject
const (
HiDriveObjectTypeDirectory = "dir"
HiDriveObjectTypeFile = "file"
HiDriveObjectTypeSymlink = "symlink"
)
// HiDriveObject describes a folder, a symlink or a file.
// Depending on the type and content, not all fields are present.
type HiDriveObject struct {
Type string `json:"type"`
ID string `json:"id"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Path string `json:"path"`
Size int64 `json:"size"`
MemberCount int64 `json:"nmembers"`
ModifiedAt Time `json:"mtime"`
ChangedAt Time `json:"ctime"`
MetaHash string `json:"mhash"`
MetaOnlyHash string `json:"mohash"`
NameHash string `json:"nhash"`
ContentHash string `json:"chash"`
IsTeamfolder bool `json:"teamfolder"`
Readable bool `json:"readable"`
Writable bool `json:"writable"`
Shareable bool `json:"shareable"`
MIMEType string `json:"mime_type"`
}
// ModTime returns the modification time of the HiDriveObject.
func (i *HiDriveObject) ModTime() time.Time {
t := time.Time(i.ModifiedAt)
if t.IsZero() {
t = time.Time(i.ChangedAt)
}
return t
}
// UnmarshalJSON turns JSON into HiDriveObject and
// introduces specific default-values where necessary.
func (i *HiDriveObject) UnmarshalJSON(data []byte) error {
type objectAlias HiDriveObject
defaultObject := objectAlias{
Size: -1,
MemberCount: -1,
}
err := json.Unmarshal(data, &defaultObject)
if err != nil {
return err
}
name, err := url.PathUnescape(defaultObject.Name)
if err == nil {
defaultObject.Name = name
}
*i = HiDriveObject(defaultObject)
return nil
}
// DirectoryContent describes the content of a directory.
type DirectoryContent struct {
TotalCount int64 `json:"nmembers"`
Entries []HiDriveObject `json:"members"`
}
// UnmarshalJSON turns JSON into DirectoryContent and
// introduces specific default-values where necessary.
func (d *DirectoryContent) UnmarshalJSON(data []byte) error {
type directoryContentAlias DirectoryContent
defaultDirectoryContent := directoryContentAlias{
TotalCount: -1,
}
err := json.Unmarshal(data, &defaultDirectoryContent)
if err != nil {
return err
}
*d = DirectoryContent(defaultDirectoryContent)
return nil
}

View File

@@ -1,888 +0,0 @@
package hidrive
// This file is for helper-functions which may provide more general and
// specialized functionality than the generic interfaces.
// There are two sections:
// 1. methods bound to Fs
// 2. other functions independent from Fs used throughout the package
// NOTE: Functions accessing paths expect any relative paths
// to be resolved prior to execution with resolvePath(...).
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"path"
"strconv"
"sync"
"time"
"github.com/rclone/rclone/backend/hidrive/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/ranges"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
const (
// MaximumUploadBytes represents the maximum amount of bytes
// a single upload-operation will support.
MaximumUploadBytes = 2147483647 // = 2GiB - 1
// iterationChunkSize represents the chunk size used to iterate directory contents.
iterationChunkSize = 5000
)
var (
// retryErrorCodes is a slice of error codes that we will always retry.
retryErrorCodes = []int{
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// ErrorFileExists is returned when a query tries to create a file
// that already exists.
ErrorFileExists = errors.New("destination file already exists")
)
// MemberType represents the possible types of entries a directory can contain.
type MemberType string
// possible values for MemberType
const (
AllMembers MemberType = "all"
NoMembers MemberType = "none"
DirectoryMembers MemberType = api.HiDriveObjectTypeDirectory
FileMembers MemberType = api.HiDriveObjectTypeFile
SymlinkMembers MemberType = api.HiDriveObjectTypeSymlink
)
// SortByField represents possible fields to sort entries of a directory by.
type SortByField string
// possible values for SortByField
const (
descendingSort string = "-"
SortByName SortByField = "name"
SortByModTime SortByField = "mtime"
SortByObjectType SortByField = "type"
SortBySize SortByField = "size"
SortByNameDescending SortByField = SortByField(descendingSort) + SortByName
SortByModTimeDescending SortByField = SortByField(descendingSort) + SortByModTime
SortByObjectTypeDescending SortByField = SortByField(descendingSort) + SortByObjectType
SortBySizeDescending SortByField = SortByField(descendingSort) + SortBySize
)
var (
// Unsorted disables sorting and can therefore not be combined with other values.
Unsorted = []SortByField{"none"}
// DefaultSorted does not specify how to sort and
// therefore implies the default sort order.
DefaultSorted = []SortByField{}
)
// CopyOrMoveOperationType represents the possible types of copy- and move-operations.
type CopyOrMoveOperationType int
// possible values for CopyOrMoveOperationType
const (
MoveOriginal CopyOrMoveOperationType = iota
CopyOriginal
CopyOriginalPreserveModTime
)
// OnExistAction represents possible actions the API should take,
// when a request tries to create a path that already exists.
type OnExistAction string
// possible values for OnExistAction
const (
// IgnoreOnExist instructs the API not to execute
// the request in case of a conflict, but to return an error.
IgnoreOnExist OnExistAction = "ignore"
// AutoNameOnExist instructs the API to automatically rename
// any conflicting request-objects.
AutoNameOnExist OnExistAction = "autoname"
// OverwriteOnExist instructs the API to overwrite any conflicting files.
// This can only be used, if the request operates on files directly.
// (For example when moving/copying a file.)
// For most requests this action will simply be ignored.
OverwriteOnExist OnExistAction = "overwrite"
)
// shouldRetry returns a boolean as to whether this resp and err deserve to be retried.
// It tries to expire/invalidate the token, if necessary.
// It returns the err as a convenience.
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil && (resp.StatusCode == 401 || isHTTPError(err, 401)) && len(resp.Header["Www-Authenticate"]) > 0 {
fs.Debugf(f, "Token might be invalid: %v", err)
if f.tokenRenewer != nil {
iErr := f.tokenRenewer.Expire()
if iErr == nil {
return true, err
}
}
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// resolvePath resolves the given (relative) path and
// returns a path suitable for API-calls.
// This will consider the root-path of the fs and any needed prefixes.
//
// Any relative paths passed to functions that access these paths should
// be resolved with this first!
func (f *Fs) resolvePath(objectPath string) string {
resolved := path.Join(f.opt.RootPrefix, f.root, f.opt.Enc.FromStandardPath(objectPath))
return resolved
}
// iterateOverDirectory calls the given function callback
// on each item found in a given directory.
//
// If callback ever returns true then this exits early with found = true.
func (f *Fs) iterateOverDirectory(ctx context.Context, directory string, searchOnly MemberType, callback func(*api.HiDriveObject) bool, fields []string, sortBy []SortByField) (found bool, err error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.AddFields("members.", fields...)
parameters.AddFields("", api.DirectoryContentFields...)
parameters.Set("members", string(searchOnly))
for _, v := range sortBy {
// The explicit conversion is necessary for each element.
parameters.AddList("sort", ",", string(v))
}
opts := rest.Opts{
Method: "GET",
Path: "/dir",
Parameters: parameters.Values,
}
iterateContent := func(result *api.DirectoryContent, err error) (bool, error) {
if err != nil {
return false, err
}
for _, item := range result.Entries {
item.Name = f.opt.Enc.ToStandardName(item.Name)
if callback(&item) {
return true, nil
}
}
return false, nil
}
return f.paginateDirectoryAccess(ctx, &opts, iterationChunkSize, 0, iterateContent)
}
// paginateDirectoryAccess executes requests specified via ctx and opts
// which should produce api.DirectoryContent.
// This will paginate the requests using limit starting at the given offset.
//
// The given function callback is called on each api.DirectoryContent found
// along with any errors that occurred.
// If callback ever returns true then this exits early with found = true.
// If callback ever returns an error then this exits early with that error.
func (f *Fs) paginateDirectoryAccess(ctx context.Context, opts *rest.Opts, limit int64, offset int64, callback func(*api.DirectoryContent, error) (bool, error)) (found bool, err error) {
for {
opts.Parameters.Set("limit", strconv.FormatInt(offset, 10)+","+strconv.FormatInt(limit, 10))
var result api.DirectoryContent
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
found, err = callback(&result, err)
if found || err != nil {
return found, err
}
offset += int64(len(result.Entries))
if offset >= result.TotalCount || limit > int64(len(result.Entries)) {
break
}
}
return false, nil
}
// fetchMetadataForPath reads the metadata from the path.
func (f *Fs) fetchMetadataForPath(ctx context.Context, path string, fields []string) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.AddFields("", fields...)
opts := rest.Opts{
Method: "GET",
Path: "/meta",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyOrMove copies or moves a directory or file
// from the source-path to the destination-path.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: Use the explicit methods instead of directly invoking this method.
// (Those are: copyDirectory, moveDirectory, copyFile, moveFile.)
func (f *Fs) copyOrMove(ctx context.Context, isDirectory bool, operationType CopyOrMoveOperationType, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.Set("src", source)
parameters.Set("dst", destination)
if onExist == AutoNameOnExist ||
(onExist == OverwriteOnExist && !isDirectory) {
parameters.Set("on_exist", string(onExist))
}
endpoint := "/"
if isDirectory {
endpoint += "dir"
} else {
endpoint += "file"
}
switch operationType {
case MoveOriginal:
endpoint += "/move"
case CopyOriginalPreserveModTime:
parameters.Set("preserve_mtime", strconv.FormatBool(true))
fallthrough
case CopyOriginal:
endpoint += "/copy"
}
opts := rest.Opts{
Method: "POST",
Path: endpoint,
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
return &result, nil
}
// copyDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) copyDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveDirectory moves the directory at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
func (f *Fs) moveDirectory(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, true, MoveOriginal, source, destination, onExist)
}
// copyFile copies the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation will expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) copyFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, CopyOriginalPreserveModTime, source, destination, onExist)
}
// moveFile moves the file at the source-path to the destination-path and
// returns the resulting api-object if successful.
//
// The operation will only be successful
// if the parent-directory of the destination-path exists.
//
// NOTE: This operation may expand sparse areas in the content of the source-file
// to blocks of 0-bytes in the destination-file.
func (f *Fs) moveFile(ctx context.Context, source string, destination string, onExist OnExistAction) (*api.HiDriveObject, error) {
return f.copyOrMove(ctx, false, MoveOriginal, source, destination, onExist)
}
// createDirectory creates the directory at the given path and
// returns the resulting api-object if successful.
//
// The directory will only be created if its parent-directory exists.
// This returns fs.ErrorDirNotFound if the parent-directory is not found.
// This returns fs.ErrorDirExists if the directory already exists.
func (f *Fs) createDirectory(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
opts := rest.Opts{
Method: "POST",
Path: "/dir",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, fs.ErrorDirExists
}
return nil, err
}
// createDirectories creates the directory at the given path
// along with any missing parent directories and
// returns the resulting api-object (of the created directory) if successful.
//
// This returns fs.ErrorDirExists if the directory already exists.
//
// If an error occurs while the parent directories are being created,
// any directories already created will NOT be deleted again.
func (f *Fs) createDirectories(ctx context.Context, directory string, onExist OnExistAction) (*api.HiDriveObject, error) {
result, err := f.createDirectory(ctx, directory, onExist)
if err == nil {
return result, nil
}
if err != fs.ErrorDirNotFound {
return nil, err
}
parentDirectory := path.Dir(directory)
_, err = f.createDirectories(ctx, parentDirectory, onExist)
if err != nil && err != fs.ErrorDirExists {
return nil, err
}
// NOTE: Ignoring fs.ErrorDirExists does no harm,
// since it does not mean the child directory cannot be created.
return f.createDirectory(ctx, directory, onExist)
}
// deleteDirectory deletes the directory at the given path.
//
// If recursive is false, the directory will only be deleted if it is empty.
// If recursive is true, the directory will be deleted regardless of its content.
// This returns fs.ErrorDirNotFound if the directory is not found.
// This returns fs.ErrorDirectoryNotEmpty if the directory is not empty and
// recursive is false.
func (f *Fs) deleteDirectory(ctx context.Context, directory string, recursive bool) error {
parameters := api.NewQueryParameters()
parameters.SetPath(directory)
parameters.Set("recursive", strconv.FormatBool(recursive))
opts := rest.Opts{
Method: "DELETE",
Path: "/dir",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
switch {
case isHTTPError(err, 404):
return fs.ErrorDirNotFound
case isHTTPError(err, 409):
return fs.ErrorDirectoryNotEmpty
}
return err
}
// deleteObject deletes the object/file at the given path.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) deleteObject(ctx context.Context, path string) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
opts := rest.Opts{
Method: "DELETE",
Path: "/file",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// createFile creates a file at the given path
// with the content of the io.ReadSeeker.
// This guarantees that existing files will not be overwritten.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
// This returns ErrorFileExists if a file already exists at the specified path.
func (f *Fs) createFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time, onExist OnExistAction) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
if onExist == AutoNameOnExist {
parameters.Set("on_exist", string(onExist))
}
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
case isHTTPError(err, 409):
return nil, ErrorFileExists
}
return nil, err
}
// overwriteFile updates the content of the file at the given path
// with the content of the io.ReadSeeker.
// If the file does not exist it will be created.
// The maximum size of the content is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// This returns fs.ErrorDirNotFound
// if the parent directory of the file is not found.
func (f *Fs) overwriteFile(ctx context.Context, path string, content io.ReadSeeker, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetFileInDirectory(path)
var err error
if !modTime.IsZero() {
err = parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "PUT",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
if _, err = content.Seek(0, io.SeekStart); err != nil {
return false, err
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorDirNotFound
}
return nil, err
}
// uploadFileChunked updates the content of the existing file at the given path
// with the content of the io.Reader.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
// Returns the resulting api-object if successful.
//
// Replaces the file contents by uploading multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: This method uses updateFileChunked and may create sparse files,
// if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) uploadFileChunked(ctx context.Context, path string, content io.Reader, modTime time.Time, chunkSize int, transferLimit int64) (okSize uint64, info *api.HiDriveObject, err error) {
okSize, err = f.updateFileChunked(ctx, path, content, 0, chunkSize, transferLimit)
if err == nil {
info, err = f.resizeFile(ctx, path, okSize, modTime)
}
return okSize, info, err
}
// updateFileChunked updates the content of the existing file at the given path
// starting at the given offset.
// Returns the position of the last successfully written byte, stopping before the first failed write.
// If nothing was written this will be 0.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.Reader.
// If the offset is beyond the file end, the file is extended up to the offset.
//
// The upload is done multiple chunks of the given size in parallel.
// Therefore this can and be used to upload files of any size efficiently.
// The number of parallel transfers is limited by transferLimit which should larger than 0.
//
// NOTE: Because it is inefficient to set the modification time with every chunk,
// setting it to a specific value must be done in a separate request
// after this operation finishes.
//
// NOTE: This method uses patchFile and may create sparse files,
// especially if the upload of a chunk fails unexpectedly.
// See note about sparse files in patchFile.
// If any of the uploads fail, the process will be aborted and
// the first error that occurred will be returned.
// This is not an atomic operation,
// therefore if the upload fails the file may be partially modified.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) updateFileChunked(ctx context.Context, path string, content io.Reader, offset uint64, chunkSize int, transferLimit int64) (okSize uint64, err error) {
var (
okChunksMu sync.Mutex // protects the variables below
okChunks []ranges.Range
)
g, gCtx := errgroup.WithContext(ctx)
transferSemaphore := semaphore.NewWeighted(transferLimit)
var readErr error
startMoreTransfers := true
zeroTime := time.Time{}
for chunk := uint64(0); startMoreTransfers; chunk++ {
// Acquire semaphore to limit number of transfers in parallel.
readErr = transferSemaphore.Acquire(gCtx, 1)
if readErr != nil {
break
}
// Read a chunk of data.
chunkReader, bytesRead, readErr := readerForChunk(content, chunkSize)
if bytesRead < chunkSize {
startMoreTransfers = false
}
if readErr != nil || bytesRead <= 0 {
break
}
// Transfer the chunk.
chunkOffset := uint64(chunkSize)*chunk + offset
g.Go(func() error {
// After this upload is done,
// signal that another transfer can be started.
defer transferSemaphore.Release(1)
uploadErr := f.patchFile(gCtx, path, cachedReader(chunkReader), chunkOffset, zeroTime)
if uploadErr == nil {
// Remember successfully written chunks.
okChunksMu.Lock()
okChunks = append(okChunks, ranges.Range{Pos: int64(chunkOffset), Size: int64(bytesRead)})
okChunksMu.Unlock()
fs.Debugf(f, "Done uploading chunk of size %v at offset %v.", bytesRead, chunkOffset)
} else {
fs.Infof(f, "Error while uploading chunk at offset %v. Error is %v.", chunkOffset, uploadErr)
}
return uploadErr
})
}
if readErr != nil {
// Log the error in case it is later ignored because of an upload-error.
fs.Infof(f, "Error while reading/preparing to upload a chunk. Error is %v.", readErr)
}
err = g.Wait()
// Compute the first continuous range of the file content,
// which does not contain any failed chunks.
// Do not forget to add the file content up to the starting offset,
// which is presumed to be already correct.
rs := ranges.Ranges{}
rs.Insert(ranges.Range{Pos: 0, Size: int64(offset)})
for _, chunkRange := range okChunks {
rs.Insert(chunkRange)
}
if len(rs) > 0 && rs[0].Pos == 0 {
okSize = uint64(rs[0].Size)
}
if err != nil {
return okSize, err
}
if readErr != nil {
return okSize, readErr
}
return okSize, nil
}
// patchFile updates the content of the existing file at the given path
// starting at the given offset.
//
// Replaces the file contents starting from the given byte offset
// with the content of the io.ReadSeeker.
// If the offset is beyond the file end, the file is extended up to the offset.
// The maximum size of the update is limited by MaximumUploadBytes.
// The io.ReadSeeker should be resettable by seeking to its start.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file up to the offset this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) patchFile(ctx context.Context, path string, content io.ReadSeeker, offset uint64, modTime time.Time) error {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("offset", strconv.FormatUint(offset, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return err
}
}
opts := rest.Opts{
Method: "PATCH",
Path: "/file",
Body: content,
ContentType: "application/octet-stream",
Parameters: parameters.Values,
NoResponse: true,
}
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
// Reset the reading index (in case this is a retry).
_, err = content.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
resp, err = f.srv.Call(ctx, &opts)
if isHTTPError(err, 423) {
return true, err
}
return f.shouldRetry(ctx, resp, err)
})
if isHTTPError(err, 404) {
return fs.ErrorObjectNotFound
}
return err
}
// resizeFile updates the existing file at the given path to be of the given size
// and returns the resulting api-object if successful.
//
// If the given size is smaller than the current filesize,
// the file is cut/truncated at that position.
// If the given size is larger, the file is extended up to that position.
// If modTime is not the zero time instant,
// it will be set as the file's modification time after the operation.
//
// NOTE: By extending the file this may create sparse files,
// which allocate less space on the file system than their apparent size indicates,
// since holes between data chunks are "real" holes
// and not regions made up of consecutive 0-bytes.
// Subsequent operations (such as copying data)
// usually expand the holes into regions of 0-bytes.
//
// This returns fs.ErrorObjectNotFound if the object is not found.
func (f *Fs) resizeFile(ctx context.Context, path string, size uint64, modTime time.Time) (*api.HiDriveObject, error) {
parameters := api.NewQueryParameters()
parameters.SetPath(path)
parameters.Set("size", strconv.FormatUint(size, 10))
if !modTime.IsZero() {
err := parameters.SetTime("mtime", modTime)
if err != nil {
return nil, err
}
}
opts := rest.Opts{
Method: "POST",
Path: "/file/truncate",
Parameters: parameters.Values,
}
var result api.HiDriveObject
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
})
switch {
case err == nil:
return &result, nil
case isHTTPError(err, 404):
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
// ------------------------------------------------------------
// isHTTPError compares the numerical status code
// of an api.Error to the given HTTP status.
//
// If the given error is not an api.Error or
// a numerical status code could not be determined, this returns false.
// Otherwise this returns whether the status code of the error is equal to the given status.
func isHTTPError(err error, status int64) bool {
if apiErr, ok := err.(*api.Error); ok {
errStatus, decodeErr := apiErr.Code.Int64()
if decodeErr == nil && errStatus == status {
return true
}
}
return false
}
// createHiDriveScopes creates oauth-scopes
// from the given user-role and access-permissions.
//
// If the arguments are empty, they will not be included in the result.
func createHiDriveScopes(role string, access string) []string {
switch {
case role != "" && access != "":
return []string{access + "," + role}
case role != "":
return []string{role}
case access != "":
return []string{access}
}
return []string{}
}
// cachedReader returns a version of the reader that caches its contents and
// can therefore be reset using Seek.
func cachedReader(reader io.Reader) io.ReadSeeker {
bytesReader, ok := reader.(*bytes.Reader)
if ok {
return bytesReader
}
repeatableReader, ok := reader.(*readers.RepeatableReader)
if ok {
return repeatableReader
}
return readers.NewRepeatableReader(reader)
}
// readerForChunk reads a chunk of bytes from reader (after handling any accounting).
// Returns a new io.Reader (chunkReader) for that chunk
// and the number of bytes that have been read from reader.
func readerForChunk(reader io.Reader, length int) (chunkReader io.Reader, bytesRead int, err error) {
// Unwrap any accounting from the input if present.
reader, wrap := accounting.UnWrap(reader)
// Read a chunk of data.
buffer := make([]byte, length)
bytesRead, err = io.ReadFull(reader, buffer)
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return nil, bytesRead, err
}
// Truncate unused capacity.
buffer = buffer[:bytesRead]
// Use wrap to put any accounting back for chunkReader.
return wrap(bytes.NewReader(buffer)), bytesRead, nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,45 +0,0 @@
// Test HiDrive filesystem interface
package hidrive
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote.
func TestIntegration(t *testing.T) {
name := "TestHiDrive"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: 1,
MaxChunkSize: MaximumUploadBytes,
CeilChunkSize: nil,
NeedMultipleChunks: false,
},
})
}
// Change the configured UploadChunkSize.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadChunkSize(chunksize fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadChunkSize = f.opt.UploadChunkSize, chunksize
return old, nil
}
// Change the configured UploadCutoff.
// Will only be called while no transfer is in progress.
func (f *Fs) SetUploadCutoff(cutoff fs.SizeSuffix) (fs.SizeSuffix, error) {
var old fs.SizeSuffix
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cutoff
return old, nil
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,410 +0,0 @@
// Package hidrivehash implements the HiDrive hashing algorithm which combines SHA-1 hashes hierarchically to a single top-level hash.
//
// Note: This implementation does not grant access to any partial hashes generated.
//
// See: https://developer.hidrive.com/wp-content/uploads/2021/07/HiDrive_Synchronization-v3.3-rev28.pdf
// (link to newest version: https://static.hidrive.com/dev/0001)
package hidrivehash
import (
"bytes"
"crypto/sha1"
"encoding"
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = 4096
// Size of the checksum in bytes.
Size = sha1.Size
// sumsPerLevel is the number of checksums
sumsPerLevel = 256
)
var (
// zeroSum is a special hash consisting of 20 null-bytes.
// This will be the hash of any empty file (or ones containing only null-bytes).
zeroSum = [Size]byte{}
// ErrorInvalidEncoding is returned when a hash should be decoded from a binary form that is invalid.
ErrorInvalidEncoding = errors.New("encoded binary form is invalid for this hash")
// ErrorHashFull is returned when a hash reached its capacity and cannot accept any more input.
ErrorHashFull = errors.New("hash reached its capacity")
)
// writeByBlock writes len(p) bytes from p to the io.Writer in blocks of size blockSize.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
//
// A pointer bytesInBlock to a counter needs to be supplied,
// that is used to keep track how many bytes have been written to the writer already.
// A pointer onlyNullBytesInBlock to a boolean needs to be supplied,
// that is used to keep track whether the block so far only consists of null-bytes.
// The callback onBlockWritten is called whenever a full block has been written to the writer
// and is given as input the number of bytes that still need to be written.
func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *uint32, onlyNullBytesInBlock *bool, onBlockWritten func(remaining int) error) (n int, err error) {
total := len(p)
nullBytes := make([]byte, blockSize)
for len(p) > 0 {
toWrite := int(blockSize - *bytesInBlock)
if toWrite > len(p) {
toWrite = len(p)
}
c, err := writer.Write(p[:toWrite])
*bytesInBlock += uint32(c)
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
// Discard data written through a reslice
p = p[c:]
if err != nil {
return total - len(p), err
}
if *bytesInBlock == blockSize {
err = onBlockWritten(len(p))
if err != nil {
return total - len(p), err
}
*bytesInBlock = 0
*onlyNullBytesInBlock = true
}
}
return total, nil
}
// level is a hash.Hash that is used to aggregate the checksums produced by the level hierarchically beneath it.
// It is used to represent any level-n hash, except for level-0.
type level struct {
checksum [Size]byte // aggregated checksum of this level
sumCount uint32 // number of sums contained in this level so far
bytesInHasher uint32 // number of bytes written into hasher so far
onlyNullBytesInHasher bool // whether the hasher only contains null-bytes so far
hasher hash.Hash
}
// NewLevel returns a new hash.Hash computing any level-n hash, except level-0.
func NewLevel() hash.Hash {
l := &level{}
l.Reset()
return l
}
// Add takes a position-embedded SHA-1 checksum and adds it to the level.
func (l *level) Add(sha1sum []byte) {
var tmp uint
var carry bool
for i := Size - 1; i >= 0; i-- {
tmp = uint(sha1sum[i]) + uint(l.checksum[i])
if carry {
tmp++
}
carry = tmp > 255
l.checksum[i] = byte(tmp)
}
}
// IsFull returns whether the number of checksums added to this level reached its capacity.
func (l *level) IsFull() bool {
return l.sumCount >= sumsPerLevel
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// Contrary to the specification from hash.Hash, this DOES return an error,
// specifically ErrorHashFull if and only if IsFull() returns true.
func (l *level) Write(p []byte) (n int, err error) {
if l.IsFull() {
return 0, ErrorHashFull
}
onBlockWritten := func(remaining int) error {
if !l.onlyNullBytesInHasher {
c, err := l.hasher.Write([]byte{byte(l.sumCount)})
l.bytesInHasher += uint32(c)
if err != nil {
return err
}
l.Add(l.hasher.Sum(nil))
}
l.sumCount++
l.hasher.Reset()
if remaining > 0 && l.IsFull() {
return ErrorHashFull
}
return nil
}
return writeByBlock(p, l.hasher, uint32(l.BlockSize()), &l.bytesInHasher, &l.onlyNullBytesInHasher, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (l *level) Sum(b []byte) []byte {
return append(b, l.checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (l *level) Reset() {
l.checksum = zeroSum // clear the current checksum
l.sumCount = 0
l.bytesInHasher = 0
l.onlyNullBytesInHasher = true
l.hasher = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (l *level) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (l *level) BlockSize() int {
return Size
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (l *level) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+4+1)
copy(b, l.checksum[:])
binary.BigEndian.PutUint32(b[Size:], l.sumCount)
binary.BigEndian.PutUint32(b[Size+4:], l.bytesInHasher)
if l.onlyNullBytesInHasher {
b[Size+4+4] = 1
}
encodedHasher, err := l.hasher.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedHasher...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (l *level) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+4+1 {
return ErrorInvalidEncoding
}
copy(l.checksum[:], b)
l.sumCount = binary.BigEndian.Uint32(b[Size:])
l.bytesInHasher = binary.BigEndian.Uint32(b[Size+4:])
switch b[Size+4+4] {
case 0:
l.onlyNullBytesInHasher = false
case 1:
l.onlyNullBytesInHasher = true
default:
return ErrorInvalidEncoding
}
err := l.hasher.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[Size+4+4+1:])
return err
}
// hidriveHash is the hash computing the actual checksum used by HiDrive by combining multiple level-hashes.
type hidriveHash struct {
levels []*level // collection of level-hashes, one for each level starting at level-1
lastSumWritten [Size]byte // the last checksum written to any of the levels
bytesInBlock uint32 // bytes written into blockHash so far
onlyNullBytesInBlock bool // whether the hasher only contains null-bytes so far
blockHash hash.Hash
}
// New returns a new hash.Hash computing the HiDrive checksum.
func New() hash.Hash {
h := &hidriveHash{}
h.Reset()
return h
}
// aggregateToLevel writes the checksum to the level at the given index
// and if necessary propagates any changes to levels above.
func (h *hidriveHash) aggregateToLevel(index int, sum []byte) {
for i := index; ; i++ {
if i >= len(h.levels) {
h.levels = append(h.levels, NewLevel().(*level))
}
_, err := h.levels[i].Write(sum)
copy(h.lastSumWritten[:], sum)
if err != nil {
panic(fmt.Errorf("level-hash should not have produced an error: %w", err))
}
if !h.levels[i].IsFull() {
break
}
sum = h.levels[i].Sum(nil)
h.levels[i].Reset()
}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
func (h *hidriveHash) Write(p []byte) (n int, err error) {
onBlockWritten := func(remaining int) error {
var sum []byte
if h.onlyNullBytesInBlock {
sum = zeroSum[:]
} else {
sum = h.blockHash.Sum(nil)
}
h.blockHash.Reset()
h.aggregateToLevel(0, sum)
return nil
}
return writeByBlock(p, h.blockHash, uint32(BlockSize), &h.bytesInBlock, &h.onlyNullBytesInBlock, onBlockWritten)
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (h *hidriveHash) Sum(b []byte) []byte {
// Save internal state.
state, err := h.MarshalBinary()
if err != nil {
panic(fmt.Errorf("saving the internal state should not have produced an error: %w", err))
}
if h.bytesInBlock > 0 {
// Fill remainder of block with null-bytes.
filler := make([]byte, h.BlockSize()-int(h.bytesInBlock))
_, err = h.Write(filler)
if err != nil {
panic(fmt.Errorf("filling with null-bytes should not have an error: %w", err))
}
}
checksum := zeroSum
for i := 0; i < len(h.levels); i++ {
level := h.levels[i]
if i < len(h.levels)-1 {
// Aggregate non-empty non-final levels.
if level.sumCount >= 1 {
h.aggregateToLevel(i+1, level.Sum(nil))
level.Reset()
}
} else {
// Determine sum of final level.
if level.sumCount > 1 {
copy(checksum[:], level.Sum(nil))
} else {
// This is needed, otherwise there is no way to return
// the non-position-embedded checksum.
checksum = h.lastSumWritten
}
}
}
// Restore internal state.
err = h.UnmarshalBinary(state)
if err != nil {
panic(fmt.Errorf("restoring the internal state should not have produced an error: %w", err))
}
return append(b, checksum[:]...)
}
// Reset resets the Hash to its initial state.
func (h *hidriveHash) Reset() {
h.levels = nil
h.lastSumWritten = zeroSum // clear the last written checksum
h.bytesInBlock = 0
h.onlyNullBytesInBlock = true
h.blockHash = sha1.New()
}
// Size returns the number of bytes Sum will return.
func (h *hidriveHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (h *hidriveHash) BlockSize() int {
return BlockSize
}
// MarshalBinary encodes the hash into a binary form and returns the result.
func (h *hidriveHash) MarshalBinary() ([]byte, error) {
b := make([]byte, Size+4+1+8)
copy(b, h.lastSumWritten[:])
binary.BigEndian.PutUint32(b[Size:], h.bytesInBlock)
if h.onlyNullBytesInBlock {
b[Size+4] = 1
}
binary.BigEndian.PutUint64(b[Size+4+1:], uint64(len(h.levels)))
for _, level := range h.levels {
encodedLevel, err := level.MarshalBinary()
if err != nil {
return nil, err
}
encodedLength := make([]byte, 8)
binary.BigEndian.PutUint64(encodedLength, uint64(len(encodedLevel)))
b = append(b, encodedLength...)
b = append(b, encodedLevel...)
}
encodedBlockHash, err := h.blockHash.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
b = append(b, encodedBlockHash...)
return b, nil
}
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
// The hash will replace its internal state accordingly.
func (h *hidriveHash) UnmarshalBinary(b []byte) error {
if len(b) < Size+4+1+8 {
return ErrorInvalidEncoding
}
copy(h.lastSumWritten[:], b)
h.bytesInBlock = binary.BigEndian.Uint32(b[Size:])
switch b[Size+4] {
case 0:
h.onlyNullBytesInBlock = false
case 1:
h.onlyNullBytesInBlock = true
default:
return ErrorInvalidEncoding
}
amount := binary.BigEndian.Uint64(b[Size+4+1:])
h.levels = make([]*level, int(amount))
offset := Size + 4 + 1 + 8
for i := range h.levels {
length := int(binary.BigEndian.Uint64(b[offset:]))
offset += 8
h.levels[i] = NewLevel().(*level)
err := h.levels[i].UnmarshalBinary(b[offset : offset+length])
if err != nil {
return err
}
offset += length
}
err := h.blockHash.(encoding.BinaryUnmarshaler).UnmarshalBinary(b[offset:])
return err
}
// Sum returns the HiDrive checksum of the data.
func Sum(data []byte) [Size]byte {
h := New().(*hidriveHash)
_, _ = h.Write(data)
var result [Size]byte
copy(result[:], h.Sum(nil))
return result
}
// Check the interfaces are satisfied.
var (
_ hash.Hash = (*level)(nil)
_ encoding.BinaryMarshaler = (*level)(nil)
_ encoding.BinaryUnmarshaler = (*level)(nil)
_ internal.LevelHash = (*level)(nil)
_ hash.Hash = (*hidriveHash)(nil)
_ encoding.BinaryMarshaler = (*hidriveHash)(nil)
_ encoding.BinaryUnmarshaler = (*hidriveHash)(nil)
)

View File

@@ -1,395 +0,0 @@
package hidrivehash_test
import (
"crypto/sha1"
"encoding"
"encoding/hex"
"fmt"
"io"
"testing"
"github.com/rclone/rclone/backend/hidrive/hidrivehash"
"github.com/rclone/rclone/backend/hidrive/hidrivehash/internal"
"github.com/stretchr/testify/assert"
)
// helper functions to set up test-tables
func sha1ArrayAsSlice(sum [sha1.Size]byte) []byte {
return sum[:]
}
func mustDecode(hexstring string) []byte {
result, err := hex.DecodeString(hexstring)
if err != nil {
panic(err)
}
return result
}
// ------------------------------------------------------------
var testTableLevelPositionEmbedded = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{78, 188, 156, 219, 173, 54, 81, 55, 47, 220, 222, 207, 201, 21, 57, 252, 255, 239, 251, 186}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{245, 202, 195, 223, 121, 198, 189, 112, 138, 202, 222, 2, 146, 156, 127, 16, 208, 233, 98, 88}),
sha1ArrayAsSlice([20]byte{68, 135, 96, 187, 38, 253, 14, 167, 186, 167, 188, 210, 91, 177, 185, 13, 208, 217, 94, 18}),
},
"documentation-v3.2rev27-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{75, 211, 153, 190, 125, 179, 67, 49, 60, 149, 98, 246, 142, 20, 11, 254, 159, 162, 129, 237}),
sha1ArrayAsSlice([20]byte{150, 2, 9, 153, 97, 153, 189, 104, 147, 14, 77, 203, 244, 243, 25, 212, 67, 48, 111, 107}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{68, 254, 92, 166, 52, 37, 104, 180, 22, 123, 249, 144, 182, 78, 64, 74, 57, 117, 225, 195}),
sha1ArrayAsSlice([20]byte{144, 209, 246, 100, 177, 216, 171, 229, 83, 17, 92, 135, 68, 98, 76, 72, 217, 24, 99, 176}),
sha1ArrayAsSlice([20]byte{38, 211, 255, 254, 19, 114, 105, 77, 230, 31, 170, 83, 57, 85, 102, 29, 28, 72, 211, 27}),
},
"documentation-example L0 (position-embedded)",
},
{
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{40, 34, 8, 238, 37, 5, 237, 184, 79, 105, 10, 167, 171, 254, 13, 229, 132, 112, 254, 8}),
sha1ArrayAsSlice([20]byte{39, 112, 26, 86, 190, 35, 100, 101, 28, 131, 122, 191, 254, 144, 239, 107, 253, 124, 104, 203}),
},
[][]byte{
sha1ArrayAsSlice([20]byte{173, 123, 132, 245, 176, 172, 43, 183, 121, 40, 66, 252, 101, 249, 188, 193, 160, 189, 2, 116}),
sha1ArrayAsSlice([20]byte{213, 157, 141, 227, 213, 178, 25, 111, 200, 145, 77, 164, 17, 247, 202, 167, 37, 46, 0, 124}),
sha1ArrayAsSlice([20]byte{253, 13, 168, 58, 147, 213, 125, 212, 229, 20, 200, 100, 16, 136, 186, 19, 34, 170, 105, 71}),
},
"documentation-example L1 (position-embedded)",
},
}
var testTableLevel = []struct {
ins [][]byte
outs [][]byte
name string
}{
{
[][]byte{
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
},
[][]byte{
mustDecode("44fe5ca6342568b4167bf990b64e404a3975e1c3"),
mustDecode("90d1f664b1d8abe553115c8744624c48d91863b0"),
mustDecode("26d3fffe1372694de61faa533955661d1c48d31b"),
},
"documentation-example L0",
},
{
[][]byte{
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
},
[][]byte{
mustDecode("ad7b84f5b0ac2bb7792842fc65f9bcc1a0bd0274"),
mustDecode("d59d8de3d5b2196fc8914da411f7caa7252e007c"),
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
},
"documentation-example L1",
},
{
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("daedc425199501b1e86b5eaba5649cbde205e6ae"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("286ac5283f99c4e0f11683900a3e39661c375dd6"),
mustDecode("0000000000000000000000000000000000000000"),
},
[][]byte{
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("0000000000000000000000000000000000000000"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("a197464ec19f2b2b2bc6b21f6c939c7e57772843"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("b04769357aa4eb4b52cd5bec6935bc8f977fa3a1"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
mustDecode("8f56351897b4e1d100646fa122c924347721b2f5"),
},
"mixed-with-empties",
},
}
var testTable = []struct {
data []byte
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatenating the results in order.
pattern []int64
out []byte
name string
}{
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64},
mustDecode("09f077820a8a41f34a639f2172f1133b1eafe4e6"),
"documentation-example L0",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256},
mustDecode("75a9f88fb219ef1dd31adf41c93e2efaac8d0245"),
"documentation-example L1",
},
{
[]byte("#ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz\n"),
[]int64{64 * 256, 0, 64 * 128, 4096 * 128, 64*2 + 32},
mustDecode("fd0da83a93d57dd4e514c8641088ba1322aa6947"),
"documentation-example L2",
},
{
[]byte("hello rclone\n"),
[]int64{316},
mustDecode("72370f9c18a2c20b31d71f3f4cee7a3cd2703737"),
"not-block-aligned",
},
{
[]byte("hello rclone\n"),
[]int64{13, 4096 * 3, 4},
mustDecode("a6990b81791f0d2db750b38f046df321c975aa60"),
"not-block-aligned-with-null-bytes",
},
{
[]byte{},
[]int64{},
mustDecode("0000000000000000000000000000000000000000"),
"empty",
},
{
[]byte{},
[]int64{0, 4096 * 256 * 256},
mustDecode("0000000000000000000000000000000000000000"),
"null-bytes",
},
}
// ------------------------------------------------------------
func TestLevelAdd(t *testing.T) {
for _, test := range testTableLevelPositionEmbedded {
l := hidrivehash.NewLevel().(internal.LevelHash)
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Add(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelWrite(t *testing.T) {
for _, test := range testTableLevel {
l := hidrivehash.NewLevel()
t.Run(test.name, func(t *testing.T) {
for i := range test.ins {
l.Write(test.ins[i])
assert.Equal(t, test.outs[i], l.Sum(nil))
}
})
}
}
func TestLevelIsFull(t *testing.T) {
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel()
for i := 0; i < 256; i++ {
assert.False(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.Equal(t, len(content), written)
if !assert.NoError(t, err) {
t.FailNow()
}
}
assert.True(t, l.(internal.LevelHash).IsFull())
written, err := l.Write(content[:])
assert.True(t, l.(internal.LevelHash).IsFull())
assert.Equal(t, 0, written)
assert.ErrorIs(t, err, hidrivehash.ErrorHashFull)
}
func TestLevelReset(t *testing.T) {
l := hidrivehash.NewLevel()
zeroHash := l.Sum(nil)
_, err := l.Write([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, l.Sum(nil))
l.Reset()
assert.Equal(t, zeroHash, l.Sum(nil))
}
}
func TestLevelSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.Size())
}
func TestLevelBlockSize(t *testing.T) {
l := hidrivehash.NewLevel()
assert.Equal(t, 20, l.BlockSize())
}
func TestLevelBinaryMarshaler(t *testing.T) {
content := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
l := hidrivehash.NewLevel().(internal.LevelHash)
l.Write(content[:10])
encoded, err := l.MarshalBinary()
if assert.NoError(t, err) {
d := hidrivehash.NewLevel().(internal.LevelHash)
err = d.UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, l.Sum(nil), d.Sum(nil))
l.Write(content[10:])
d.Write(content[10:])
assert.Equal(t, l.Sum(nil), d.Sum(nil))
}
}
}
func TestLevelInvalidEncoding(t *testing.T) {
l := hidrivehash.NewLevel().(internal.LevelHash)
err := l.UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
// ------------------------------------------------------------
type infiniteReader struct {
source []byte
offset int
}
func (m *infiniteReader) Read(b []byte) (int, error) {
count := copy(b, m.source[m.offset:])
m.offset += count
m.offset %= len(m.source)
return count, nil
}
func writeInChunks(writer io.Writer, chunkSize int64, data []byte, pattern []int64) error {
readers := make([]io.Reader, len(pattern))
nullBytes := [4096]byte{}
for i, n := range pattern {
if i%2 == 0 {
readers[i] = io.LimitReader(&infiniteReader{data, 0}, n*int64(len(data)))
} else {
readers[i] = io.LimitReader(&infiniteReader{nullBytes[:], 0}, n)
}
}
reader := io.MultiReader(readers...)
for {
_, err := io.CopyN(writer, reader, chunkSize)
if err != nil {
if err == io.EOF {
err = nil
}
return err
}
}
}
func TestWrite(t *testing.T) {
for _, test := range testTable {
t.Run(test.name, func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern)
if assert.NoError(t, err) {
normalSum := h.Sum(nil)
assert.Equal(t, test.out, normalSum)
// Test if different block-sizes produce differing results.
for _, blockSize := range []int64{397, 512, 4091, 8192, 10000} {
t.Run(fmt.Sprintf("block-size %v", blockSize), func(t *testing.T) {
h := hidrivehash.New()
err := writeInChunks(h, blockSize, test.data, test.pattern)
if assert.NoError(t, err) {
assert.Equal(t, normalSum, h.Sum(nil))
}
})
}
}
})
}
}
func TestReset(t *testing.T) {
h := hidrivehash.New()
zeroHash := h.Sum(nil)
_, err := h.Write([]byte{1})
if assert.NoError(t, err) {
assert.NotEqual(t, zeroHash, h.Sum(nil))
h.Reset()
assert.Equal(t, zeroHash, h.Sum(nil))
}
}
func TestSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 20, h.Size())
}
func TestBlockSize(t *testing.T) {
h := hidrivehash.New()
assert.Equal(t, 4096, h.BlockSize())
}
func TestBinaryMarshaler(t *testing.T) {
for _, test := range testTable {
h := hidrivehash.New()
d := hidrivehash.New()
half := len(test.pattern) / 2
t.Run(test.name, func(t *testing.T) {
err := writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[:half])
assert.NoError(t, err)
encoded, err := h.(encoding.BinaryMarshaler).MarshalBinary()
if assert.NoError(t, err) {
err = d.(encoding.BinaryUnmarshaler).UnmarshalBinary(encoded)
if assert.NoError(t, err) {
assert.Equal(t, h.Sum(nil), d.Sum(nil))
err = writeInChunks(h, int64(h.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
err = writeInChunks(d, int64(d.BlockSize()), test.data, test.pattern[half:])
assert.NoError(t, err)
assert.Equal(t, h.Sum(nil), d.Sum(nil))
}
}
})
}
}
func TestInvalidEncoding(t *testing.T) {
h := hidrivehash.New()
err := h.(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte{})
assert.ErrorIs(t, err, hidrivehash.ErrorInvalidEncoding)
}
func TestSum(t *testing.T) {
assert.Equal(t, [hidrivehash.Size]byte{}, hidrivehash.Sum([]byte{}))
content := []byte{1}
h := hidrivehash.New()
h.Write(content)
sum := hidrivehash.Sum(content)
assert.Equal(t, h.Sum(nil), sum[:])
}

View File

@@ -1,18 +0,0 @@
// Package internal provides utilities for HiDrive.
package internal
import (
"encoding"
"hash"
)
// LevelHash is an internal interface for level-hashes.
type LevelHash interface {
encoding.BinaryMarshaler
encoding.BinaryUnmarshaler
hash.Hash
// Add takes a position-embedded checksum and adds it to the level.
Add(sum []byte)
// IsFull returns whether the number of checksums added to this level reached its capacity.
IsFull() bool
}

View File

@@ -13,6 +13,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -34,11 +35,11 @@ var (
func init() { func init() {
fsi := &fs.RegInfo{ fsi := &fs.RegInfo{
Name: "http", Name: "http",
Description: "HTTP", Description: "http Connection",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "url", Name: "url",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.", Help: "URL of http host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
Required: true, Required: true,
}, { }, {
Name: "headers", Name: "headers",
@@ -304,7 +305,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.head(ctx) err := o.stat(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -316,6 +317,15 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote) return f.endpointURL + rest.URLPathEscape(remote)
} }
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName // Errors returned by parseName
var ( var (
errURLJoinFailed = errors.New("URLJoin failed") errURLJoinFailed = errors.New("URLJoin failed")
@@ -490,7 +500,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f, fs: f,
remote: remote, remote: remote,
} }
switch err := file.head(ctx); err { switch err := file.stat(ctx); err {
case nil: case nil:
add(file) add(file)
case fs.ErrorNotAFile: case fs.ErrorNotAFile:
@@ -569,8 +579,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote) return o.fs.url(o.remote)
} }
// head sends a HEAD request to update info fields in the Object // stat updates the info field in the Object
func (o *Object) head(ctx context.Context) error { func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead { if o.fs.opt.NoHead {
o.size = -1 o.size = -1
o.modTime = timeUnset o.modTime = timeUnset
@@ -591,19 +601,13 @@ func (o *Object) head(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to stat: %w", err) return fmt.Errorf("failed to stat: %w", err)
} }
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified")) t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil { if err != nil {
t = timeUnset t = timeUnset
} }
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t o.modTime = t
o.contentType = res.Header.Get("Content-Type") o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory // If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash { if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType) mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -649,9 +653,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil { if err != nil {
return nil, fmt.Errorf("Open failed: %w", err) return nil, fmt.Errorf("Open failed: %w", err)
} }
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil return res.Body, nil
} }

View File

@@ -3,7 +3,7 @@ package http
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
@@ -33,21 +33,20 @@ var (
lineEndSize = 1 lineEndSize = 1
) )
// prepareServer prepares the test server and shuts it down automatically // prepareServer the test server and return a function to tidy it up afterwards
// when the test completes. func prepareServer(t *testing.T) (configmap.Simple, func()) {
func prepareServer(t *testing.T) configmap.Simple {
// file server for test/files // file server for test/files
fileServer := http.FileServer(http.Dir(filesPath)) fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings // verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there // are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting) // we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath) fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err) require.NoError(t, err)
require.Greater(t, len(fileList), 0) require.Greater(t, len(fileList), 0)
for _, file := range fileList { for _, file := range fileList {
if !file.IsDir() { if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name())) data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") { if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2 lineEndSize = 2
} }
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
"url": ts.URL, "url": ts.URL,
"headers": strings.Join(headers, ","), "headers": strings.Join(headers, ","),
} }
t.Cleanup(ts.Close)
return m // return a function to tidy up
return m, ts.Close
} }
// prepare prepares the test server and shuts it down automatically // prepare the test server and return a function to tidy it up afterwards
// when the test completes. func prepare(t *testing.T) (fs.Fs, func()) {
func prepare(t *testing.T) fs.Fs { m, tidy := prepareServer(t)
m := prepareServer(t)
// Instantiate it // Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m) f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err) require.NoError(t, err)
return f return f, tidy
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
} }
func TestListRoot(t *testing.T) { func TestListRoot(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false) testListRoot(t, f, false)
} }
func TestListRootNoSlash(t *testing.T) { func TestListRootNoSlash(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true) testListRoot(t, f, true)
} }
func TestListSubDir(t *testing.T) { func TestListSubDir(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three") entries, err := f.List(context.Background(), "three")
require.NoError(t, err) require.NoError(t, err)
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
} }
func TestNewObject(t *testing.T) { func TestNewObject(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -192,69 +194,36 @@ func TestNewObject(t *testing.T) {
} }
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
m := prepareServer(t) f, tidy := prepare(t)
defer tidy()
for _, head := range []bool{false, true} { o, err := f.NewObject(context.Background(), "four/under four.txt")
if !head { require.NoError(t, err)
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} { // Test normal read
o, err := f.NewObject(context.Background(), "four/under four.txt") fd, err := o.Open(context.Background())
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
if !head { require.NoError(t, err)
// Test mod time is still indeterminate require.NoError(t, fd.Close())
tObj := o.ModTime(context.Background()) if lineEndSize == 2 {
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj)) assert.Equal(t, "beetroot\r\n", string(data))
} else {
// Test file size is still indeterminate assert.Equal(t, "beetroot\n", string(data))
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
} }
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
} }
func TestMimeType(t *testing.T) { func TestMimeType(t *testing.T) {
f := prepare(t) f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
@@ -265,7 +234,8 @@ func TestMimeType(t *testing.T) {
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m) f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
@@ -274,7 +244,8 @@ func TestIsAFileRoot(t *testing.T) {
} }
func TestIsAFileSubDir(t *testing.T) { func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t) m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m) f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)

62
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,62 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

200
backend/hubic/hubic.go Normal file
View File

@@ -0,0 +1,200 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -0,0 +1,19 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -38,100 +38,6 @@ func init() {
Name: "internetarchive", Name: "internetarchive",
Description: "Internet Archive", Description: "Internet Archive",
NewFs: NewFs, NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: map[string]fs.MetadataHelp{
"name": {
Help: "Full file path, without the bucket part",
Type: "filename",
Example: "backend/internetarchive/internetarchive.go",
ReadOnly: true,
},
"source": {
Help: "The source of the file",
Type: "string",
Example: "original",
ReadOnly: true,
},
"mtime": {
Help: "Time of last modification, managed by Rclone",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
ReadOnly: true,
},
"size": {
Help: "File size in bytes",
Type: "decimal number",
Example: "123456",
ReadOnly: true,
},
"md5": {
Help: "MD5 hash calculated by Internet Archive",
Type: "string",
Example: "01234567012345670123456701234567",
ReadOnly: true,
},
"crc32": {
Help: "CRC32 calculated by Internet Archive",
Type: "string",
Example: "01234567",
ReadOnly: true,
},
"sha1": {
Help: "SHA1 hash calculated by Internet Archive",
Type: "string",
Example: "0123456701234567012345670123456701234567",
ReadOnly: true,
},
"format": {
Help: "Name of format identified by Internet Archive",
Type: "string",
Example: "Comma-Separated Values",
ReadOnly: true,
},
"old_version": {
Help: "Whether the file was replaced and moved by keep-old-version flag",
Type: "boolean",
Example: "true",
ReadOnly: true,
},
"viruscheck": {
Help: "The last time viruscheck process was run for the file (?)",
Type: "unixtime",
Example: "1654191352",
ReadOnly: true,
},
"summation": {
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
Type: "string",
Example: "md5",
ReadOnly: true,
},
"rclone-ia-mtime": {
Help: "Time of last modification, managed by Internet Archive",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
},
"rclone-mtime": {
Help: "Time of last modification, managed by Rclone",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z",
},
"rclone-update-track": {
Help: "Random value used by Rclone for tracking changes inside Internet Archive",
Type: "string",
Example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
},
},
Help: `Metadata fields provided by Internet Archive.
If there are multiple values for a key, only the first one is returned.
This is a limitation of Rclone, that supports one value per one key.
Owner is able to add custom keys. Metadata feature grabs all the keys including them.
`,
},
Options: []fs.Option{{ Options: []fs.Option{{
Name: "access_key_id", Name: "access_key_id",
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php", Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
@@ -184,14 +90,6 @@ Only enable if you need to be guaranteed to be reflected after write operations.
// maximum size of an item. this is constant across all items // maximum size of an item. this is constant across all items
const iaItemMaxSize int64 = 1099511627776 const iaItemMaxSize int64 = 1099511627776
// metadata keys that are not writeable
var roMetadataKey = map[string]interface{}{
// do not add mtime here, it's a documented exception
"name": nil, "source": nil, "size": nil, "md5": nil,
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
"viruscheck": nil, "summation": nil,
}
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
AccessKeyID string `config:"access_key_id"` AccessKeyID string `config:"access_key_id"`
@@ -224,10 +122,9 @@ type Object struct {
md5 string // md5 hash of the file presented by the server md5 string // md5 hash of the file presented by the server
sha1 string // sha1 hash of the file presented by the server sha1 string // sha1 hash of the file presented by the server
crc32 string // crc32 of the file presented by the server crc32 string // crc32 of the file presented by the server
rawData json.RawMessage
} }
// IAFile represents a subset of object in MetadataResponse.Files // IAFile reprensents a subset of object in MetadataResponse.Files
type IAFile struct { type IAFile struct {
Name string `json:"name"` Name string `json:"name"`
// Source string `json:"source"` // Source string `json:"source"`
@@ -238,23 +135,14 @@ type IAFile struct {
Md5 string `json:"md5"` Md5 string `json:"md5"`
Crc32 string `json:"crc32"` Crc32 string `json:"crc32"`
Sha1 string `json:"sha1"` Sha1 string `json:"sha1"`
Summation string `json:"summation"`
rawData json.RawMessage
} }
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/ // MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
type MetadataResponse struct { type MetadataResponse struct {
Files []IAFile `json:"files"` Files []IAFile `json:"files"`
ItemSize int64 `json:"item_size"` ItemSize int64 `json:"item_size"`
} }
// MetadataResponseRaw is the form of MetadataResponse to deal with metadata
type MetadataResponseRaw struct {
Files []json.RawMessage `json:"files"`
ItemSize int64 `json:"item_size"`
}
// ModMetadataResponse represents response for amending metadata // ModMetadataResponse represents response for amending metadata
type ModMetadataResponse struct { type ModMetadataResponse struct {
// https://archive.org/services/docs/api/md-write.html#example // https://archive.org/services/docs/api/md-write.html#example
@@ -338,10 +226,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
BucketBased: true, BucketBased: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f) }).Fill(ctx, f)
f.srv = rest.NewClient(fshttp.NewClient(ctx)) f.srv = rest.NewClient(fshttp.NewClient(ctx))
@@ -422,17 +307,18 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
} }
// https://archive.org/services/docs/api/md-write.html // https://archive.org/services/docs/api/md-write.html
// the following code might be useful for modifying metadata of an uploaded file var patch = []interface{}{
patch := []map[string]string{
// we should drop it first to clear all rclone-provided mtimes // we should drop it first to clear all rclone-provided mtimes
{ struct {
"op": "remove", Op string `json:"op"`
"path": "/rclone-mtime", Path string `json:"path"`
}, { }{"remove", "/rclone-mtime"},
"op": "add", struct {
"path": "/rclone-mtime", Op string `json:"op"`
"value": t.Format(time.RFC3339Nano), Path string `json:"path"`
}} Value string `json:"value"`
}{"add", "/rclone-mtime", t.Format(time.RFC3339Nano)},
}
res, err := json.Marshal(patch) res, err := json.Marshal(patch)
if err != nil { if err != nil {
return err return err
@@ -572,14 +458,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err return "", err
} }
bucket, bucketPath := f.split(remote) bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -760,7 +646,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path // make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))), Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
Options: optionsFixed, Options: optionsFixed,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -799,23 +685,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers["Content-Length"] = fmt.Sprintf("%d", size) headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size) headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
} }
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, src, options)
if err == nil && mdata != nil {
for mk, mv := range mdata {
mk = strings.ToLower(mk)
if strings.HasPrefix(mk, "rclone-") {
fs.LogPrintf(fs.LogLevelWarning, o, "reserved metadata key %s is about to set", mk)
} else if _, ok := roMetadataKey[mk]; ok {
fs.LogPrintf(fs.LogLevelWarning, o, "setting or modifying read-only key %s is requested, skipping", mk)
continue
} else if mk == "mtime" {
// redirect to make it work
mk = "rclone-mtime"
}
headers[fmt.Sprintf("x-amz-filemeta-%s", mk)] = mv
}
}
// read the md5sum if available // read the md5sum if available
var md5sumHex string var md5sumHex string
@@ -893,34 +762,6 @@ func (o *Object) String() string {
return o.remote return o.remote
} }
// Metadata returns all file metadata provided by Internet Archive
func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
if o.rawData == nil {
return nil, nil
}
raw := make(map[string]json.RawMessage)
err = json.Unmarshal(o.rawData, &raw)
if err != nil {
// fatal: json parsing failed
return
}
for k, v := range raw {
items, err := listOrString(v)
if len(items) == 0 || err != nil {
// skip: an entry failed to parse
continue
}
m.Set(k, items[0])
}
// move the old mtime to an another key
if v, ok := m["mtime"]; ok {
m["rclone-ia-mtime"] = v
}
// overwrite with a correct mtime
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
return
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil { if resp != nil {
for _, e := range retryErrorCodes { for _, e := range retryErrorCodes {
@@ -947,7 +788,7 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote) return o.fs.split(o.remote)
} }
func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *MetadataResponse, err error) { func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result MetadataResponse, err error) {
var resp *http.Response var resp *http.Response
// make a GET request to (frontend)/metadata/:item/ // make a GET request to (frontend)/metadata/:item/
opts := rest.Opts{ opts := rest.Opts{
@@ -955,15 +796,12 @@ func (f *Fs) requestMetadata(ctx context.Context, bucket string) (result *Metada
Path: path.Join("/metadata/", bucket), Path: path.Join("/metadata/", bucket),
} }
var temp MetadataResponseRaw
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.front.CallJSON(ctx, &opts, nil, &temp) resp, err = f.front.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil {
return return result, err
}
return temp.unraw()
} }
// list up all files/directories without any filters // list up all files/directories without any filters
@@ -1152,21 +990,15 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
} }
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object { func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
ret := &Object{ return &Object{
fs: f, fs: f,
remote: remote, remote: remote,
modTime: mtime, modTime: mtime,
size: size, size: size,
rawData: file.rawData, md5: file.Md5,
crc32: file.Crc32,
sha1: file.Sha1,
} }
// hashes from _files.xml (where summation != "") is different from one in other files
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
if file.Summation == "" {
ret.md5 = file.Md5
ret.crc32 = file.Crc32
ret.sha1 = file.Sha1
}
return ret
} }
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object { func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
@@ -1213,23 +1045,6 @@ func (file IAFile) parseMtime() (mtime time.Time) {
return mtime return mtime
} }
func (mrr *MetadataResponseRaw) unraw() (_ *MetadataResponse, err error) {
var files []IAFile
for _, raw := range mrr.Files {
var parsed IAFile
err = json.Unmarshal(raw, &parsed)
if err != nil {
return nil, err
}
parsed.rawData = raw
files = append(files, parsed)
}
return &MetadataResponse{
Files: files,
ItemSize: mrr.ItemSize,
}, nil
}
func compareSize(a, b int64) bool { func compareSize(a, b int64) bool {
if a < 0 || b < 0 { if a < 0 || b < 0 {
// we won't compare if any of them is not known // we won't compare if any of them is not known
@@ -1273,7 +1088,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/")) return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
} }
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape // mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string { func quotePath(s string) string {
seg := strings.Split(s, "/") seg := strings.Split(s, "/")
newValues := []string{} newValues := []string{}
@@ -1291,5 +1106,4 @@ var (
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.Abouter = &Fs{} _ fs.Abouter = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.Metadataer = &Object{}
) )

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Jottacloud API.
package api package api
import ( import (

View File

@@ -1,4 +1,3 @@
// Package jottacloud provides an interface to the Jottacloud storage system.
package jottacloud package jottacloud
import ( import (
@@ -12,6 +11,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math/rand" "math/rand"
"net/http" "net/http"
"net/url" "net/url"
@@ -46,9 +46,9 @@ const (
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta" defaultDevice = "Jotta"
defaultMountpoint = "Archive" defaultMountpoint = "Archive"
jfsURL = "https://jfs.jottacloud.com/jfs/" rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/" apiURL = "https://api.jottacloud.com/"
wwwURL = "https://www.jottacloud.com/" baseURL = "https://www.jottacloud.com/"
cachePrefix = "rclone-jcmd5-" cachePrefix = "rclone-jcmd5-"
configDevice = "device" configDevice = "device"
configMountpoint = "mountpoint" configMountpoint = "mountpoint"
@@ -127,7 +127,7 @@ func init() {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State { switch config.State {
case "": case "":
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{ return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
Value: "standard", Value: "standard",
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.", Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, { }, {
@@ -145,7 +145,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
return fs.ConfigGoto(config.Result) return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion)) m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure") return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token": case "standard_token":
loginToken := config.Result loginToken := config.Result
m.Set(configClientID, defaultClientID) m.Set(configClientID, defaultClientID)
@@ -262,11 +262,7 @@ machines.`)
}, },
}) })
case "choose_device": case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint? return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
Choosing no, the default, will let you access the storage used for the archive
section of the official Jottacloud client. If you instead want to access the
sync or the backup section, for example, you must choose yes.`)
case "choose_device_query": case "choose_device_query":
if config.Result != "true" { if config.Result != "true" {
m.Set(configDevice, "") m.Set(configDevice, "")
@@ -277,139 +273,43 @@ sync or the backup section, for example, you must choose yes.`)
if err != nil { if err != nil {
return nil, err return nil, err
} }
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL) srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL) apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv) cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil { if err != nil {
return nil, err return nil, err
} }
m.Set(configUsername, cust.Username)
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username) acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
deviceNames := make([]string, len(acc.Devices)) return acc.Devices[i].Name, ""
for i, dev := range acc.Devices {
if i > 0 && dev.Name == defaultDevice {
// Insert the special Jotta device as first entry, making it the default choice.
copy(deviceNames[1:i+1], deviceNames[0:i])
deviceNames[0] = dev.Name
} else {
deviceNames[i] = dev.Name
}
}
help := fmt.Sprintf(`The device to use. In standard setup the built-in %s device is used,
which contains predefined mountpoints for archive, sync etc. All other devices
are treated as backup devices by the official Jottacloud client. You may create
a new by entering a unique name.`, defaultDevice)
return fs.ConfigChoose("choose_device_result", "config_device", help, len(deviceNames), func(i int) (string, string) {
return deviceNames[i], ""
}) })
case "choose_device_result": case "choose_device_result":
device := config.Result device := config.Result
m.Set(configDevice, device)
oAuthClient, _, err := getOAuthClient(ctx, name, m) oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil { if err != nil {
return nil, err return nil, err
} }
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL) srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv) username, _ := m.Get(configUsername)
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
if err != nil {
return nil, err
}
isNew := true
for _, dev := range acc.Devices {
if strings.EqualFold(dev.Name, device) { // If device name exists with different casing we prefer the existing (not sure if and how the api handles the opposite)
device = dev.Name // Prefer same casing as existing, e.g. if user entered "jotta" we use the standard casing "Jotta" instead
isNew = false
break
}
}
var dev *api.JottaDevice
if isNew {
fs.Debugf(nil, "Creating new device: %s", device)
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
}
m.Set(configDevice, device)
if !isNew {
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
}
var help string
if device == defaultDevice {
// With built-in Jotta device the mountpoint choice is exclusive,
// we do not want to risk any problems by creating new mountpoints on it.
help = fmt.Sprintf(`The mountpoint to use on the built-in device %s.
The standard setup is to use the %s mountpoint. Most other mountpoints
have very limited support in rclone and should generally be avoided.`, defaultDevice, defaultMountpoint)
return fs.ConfigChooseExclusive("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
}
help = fmt.Sprintf(`The mountpoint to use on the non-standard device %s.
You may create a new by entering a unique name.`, device)
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", help, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, "" return dev.MountPoints[i].Name, ""
}) })
case "choose_device_mountpoint": case "choose_device_mountpoint":
mountpoint := config.Result mountpoint := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
device, _ := m.Get(configDevice)
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
if err != nil {
return nil, err
}
isNew := true
for _, mnt := range dev.MountPoints {
if strings.EqualFold(mnt.Name, mountpoint) {
mountpoint = mnt.Name
isNew = false
break
}
}
if isNew {
if device == defaultDevice {
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
}
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
if err != nil {
return nil, err
}
}
m.Set(configMountpoint, mountpoint) m.Set(configMountpoint, mountpoint)
return fs.ConfigGoto("end") return fs.ConfigGoto("end")
case "end": case "end":
// All the config flows end up here in case we need to carry on with something // All the config flows end up here in case we need to carry on with something
@@ -432,17 +332,16 @@ type Options struct {
// Fs represents a remote jottacloud // Fs represents a remote jottacloud
type Fs struct { type Fs struct {
name string name string
root string root string
user string user string
opt Options opt Options
features *fs.Features features *fs.Features
fileEndpoint string endpointURL string
allocateEndpoint string srv *rest.Client
jfsSrv *rest.Client apiSrv *rest.Client
apiSrv *rest.Client pacer *fs.Pacer
pacer *fs.Pacer tokenRenewer *oauthutil.Renew // renew the token on expiry
tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
// Object describes a jottacloud object // Object describes a jottacloud object
@@ -689,47 +588,15 @@ func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *ap
return info, nil return info, nil
} }
// createDevice makes a device // setEndpointURL generates the API endpoint URL
func createDevice(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) { func (f *Fs) setEndpointURL() {
opts := rest.Opts{
Method: "POST",
Path: urlPathEscape(path),
Parameters: url.Values{},
}
opts.Parameters.Set("type", "WORKSTATION")
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, fmt.Errorf("couldn't create device: %w", err)
}
return info, nil
}
// createMountPoint makes a mount point
func createMountPoint(ctx context.Context, srv *rest.Client, path string) (info *api.JottaMountPoint, err error) {
opts := rest.Opts{
Method: "POST",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, fmt.Errorf("couldn't create mountpoint: %w", err)
}
return info, nil
}
// setEndpoints generates the API endpoints
func (f *Fs) setEndpoints() {
if f.opt.Device == "" { if f.opt.Device == "" {
f.opt.Device = defaultDevice f.opt.Device = defaultDevice
} }
if f.opt.Mountpoint == "" { if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint f.opt.Mountpoint = defaultMountpoint
} }
f.fileEndpoint = path.Join(f.user, f.opt.Device, f.opt.Mountpoint) f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.allocateEndpoint = path.Join("/jfs", f.opt.Device, f.opt.Mountpoint)
} }
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
@@ -741,7 +608,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var result api.JottaFile var result api.JottaFile
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
@@ -786,30 +653,13 @@ func urlPathEscape(in string) string {
} }
// filePathRaw returns an unescaped file path (f.root, file) // filePathRaw returns an unescaped file path (f.root, file)
// Optionally made absolute by prefixing with "/", typically required when used func (f *Fs) filePathRaw(file string) string {
// as request parameter instead of the path (which is relative to some root url). return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
func (f *Fs) filePathRaw(file string, absolute bool) string {
prefix := ""
if absolute {
prefix = "/"
}
return path.Join(prefix, f.fileEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
} }
// filePath returns an escaped file path (f.root, file) // filePath returns an escaped file path (f.root, file)
func (f *Fs) filePath(file string) string { func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file, false)) return urlPathEscape(f.filePathRaw(file))
}
// allocatePathRaw returns an unescaped allocate file path (f.root, file)
// Optionally made absolute by prefixing with "/", typically required when used
// as request parameter instead of the path (which is relative to some root url).
func (f *Fs) allocatePathRaw(file string, absolute bool) string {
prefix := ""
if absolute {
prefix = "/"
}
return path.Join(prefix, f.allocateEndpoint, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
} }
// Jottacloud requires the grant_type 'refresh_token' string // Jottacloud requires the grant_type 'refresh_token' string
@@ -821,7 +671,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) { func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() { if legacyTokenURL == req.URL.String() {
// read the entire body // read the entire body
refreshBody, err := io.ReadAll(req.Body) refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil { if err != nil {
return return
} }
@@ -831,7 +681,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1)) refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close()) // set the new ReadCloser (with a dummy Close())
req.Body = io.NopCloser(bytes.NewReader(refreshBody)) req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
} }
} }
@@ -842,12 +692,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
if ok { if ok {
ver, err = strconv.Atoi(version) ver, err = strconv.Atoi(version)
if err != nil { if err != nil {
return nil, nil, errors.New("failed to parse config version") return nil, nil, errors.New("Failed to parse config version")
} }
ok = (ver == configVersion) || (ver == legacyConfigVersion) ok = (ver == configVersion) || (ver == legacyConfigVersion)
} }
if !ok { if !ok {
return nil, nil, errors.New("outdated config - please reconfigure this backend") return nil, nil, errors.New("Outdated config - please reconfigure this backend")
} }
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(ctx)
@@ -893,7 +743,7 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
// Create OAuth Client // Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to configure Jottacloud oauth client: %w", err) return nil, nil, fmt.Errorf("Failed to configure Jottacloud oauth client: %w", err)
} }
return oAuthClient, ts, nil return oAuthClient, ts, nil
} }
@@ -919,7 +769,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
jfsSrv: rest.NewClient(oAuthClient).SetRoot(jfsURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL), apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
@@ -929,7 +779,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: false, WriteMimeType: false,
}).Fill(ctx, f) }).Fill(ctx, f)
f.jfsSrv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil f.features.ListR = nil
} }
@@ -948,7 +798,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
f.user = cust.Username f.user = cust.Username
f.setEndpoints() f.setEndpointURL()
if root != "" && !rootIsDir { if root != "" && !rootIsDir {
// Check to see if the root actually an existing file // Check to see if the root actually an existing file
@@ -1014,7 +864,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
opts.Parameters.Set("mkDir", "true") opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &jf) resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
@@ -1043,7 +893,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var resp *http.Response var resp *http.Response
var result api.JottaFolder var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
@@ -1180,7 +1030,7 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
if expected.Folders != actual.Folders || if expected.Folders != actual.Folders ||
expected.Files != actual.Files { expected.Files != actual.Files {
return fmt.Errorf("invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual) return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
} }
return nil return nil
} }
@@ -1201,7 +1051,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.Call(ctx, &opts) resp, err = f.srv.Call(ctx, &opts)
if err != nil { if err != nil {
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
} }
@@ -1247,10 +1097,13 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned. // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.Device != "Jotta" {
return nil, errors.New("upload not supported for devices other than Jotta")
}
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size()) o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
return o, o.Update(ctx, in, src, options...) return o, o.Update(ctx, in, src, options...)
} }
@@ -1260,7 +1113,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error { func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("") // defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists // chop off trailing / if it exists
parent := path.Dir(strings.TrimSuffix(dirPath, "/")) if strings.HasSuffix(dirPath, "/") {
dirPath = dirPath[:len(dirPath)-1]
}
parent := path.Dir(dirPath)
if parent == "." { if parent == "." {
parent = "" parent = ""
} }
@@ -1308,7 +1164,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.Call(ctx, &opts) resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
@@ -1361,7 +1217,7 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time,
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info) resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
@@ -1382,11 +1238,11 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
Parameters: url.Values{}, Parameters: url.Values{},
} }
opts.Parameters.Set(method, f.filePathRaw(dest, true)) opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &info) resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
@@ -1397,9 +1253,9 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server-side copy operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1417,7 +1273,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote) info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?) // if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" { if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring") fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5) info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
@@ -1433,9 +1289,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1495,7 +1351,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists return fs.ErrorDirExists
} }
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.fileEndpoint, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote) _, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil { if err != nil {
return fmt.Errorf("couldn't move directory: %w", err) return fmt.Errorf("couldn't move directory: %w", err)
@@ -1520,7 +1376,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var resp *http.Response var resp *http.Response
var result api.JottaFile var result api.JottaFile
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.jfsSrv.CallXML(ctx, &opts, nil, &result) resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
@@ -1546,19 +1402,19 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", errors.New("couldn't create public link - no uri received") return "", errors.New("couldn't create public link - no uri received")
} }
if result.PublicSharePath != "" { if result.PublicSharePath != "" {
webLink := joinPath(wwwURL, result.PublicSharePath) webLink := joinPath(baseURL, result.PublicSharePath)
fs.Debugf(nil, "Web link: %s", webLink) fs.Debugf(nil, "Web link: %s", webLink)
} else { } else {
fs.Debugf(nil, "No web link received") fs.Debugf(nil, "No web link received")
} }
directLink := joinPath(wwwURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI)) directLink := joinPath(baseURL, fmt.Sprintf("opin/io/downloadPublic/%s/%s", f.user, result.PublicURI))
fs.Debugf(nil, "Direct link: %s", directLink) fs.Debugf(nil, "Direct link: %s", directLink)
return directLink, nil return directLink, nil
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := getDriveInfo(ctx, f.jfsSrv, f.user) info, err := getDriveInfo(ctx, f.srv, f.user)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1761,7 +1617,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Parameters.Set("mode", "bin") opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.jfsSrv.Call(ctx, &opts) resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
@@ -1788,7 +1644,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File var tempFile *os.File
// create the cache file // create the cache file
tempFile, err = os.CreateTemp("", cachePrefix) tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil { if err != nil {
return return
} }
@@ -1816,7 +1672,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else { } else {
// that's a small file, just read it into memory // that's a small file, just read it into memory
var inData []byte var inData []byte
inData, err = io.ReadAll(teeReader) inData, err = ioutil.ReadAll(teeReader)
if err != nil { if err != nil {
return return
} }
@@ -1829,7 +1685,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// If existing is set then it updates the object rather than creating a new one. // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
@@ -1882,7 +1738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Created: fileDate, Created: fileDate,
Modified: fileDate, Modified: fileDate,
Md5: md5String, Md5: md5String,
Path: o.fs.allocatePathRaw(o.remote, true), Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
} }
// send it // send it
@@ -1913,7 +1769,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :) // copy the already uploaded bytes into the trash :)
var result api.UploadResponse var result api.UploadResponse
_, err = io.CopyN(io.Discard, in, response.ResumePos) _, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil { if err != nil {
return err return err
} }
@@ -1952,7 +1808,7 @@ func (o *Object) remove(ctx context.Context, hard bool) error {
} }
return o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.jfsSrv.CallXML(ctx, &opts, nil, nil) resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(ctx, resp, err)
}) })
} }

View File

@@ -1,4 +1,3 @@
// Package koofr provides an interface to the Koofr storage system.
package koofr package koofr
import ( import (
@@ -352,9 +351,9 @@ func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff
} }
if f.mountID == "" { if f.mountID == "" {
if opt.MountID == "" { if opt.MountID == "" {
return nil, errors.New("failed to find primary mount") return nil, errors.New("Failed to find primary mount")
} }
return nil, errors.New("failed to find mount " + opt.MountID) return nil, errors.New("Failed to find mount " + opt.MountID)
} }
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root)) rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
if err == nil && rootFile.Type != "dir" { if err == nil && rootFile.Type != "dir" {
@@ -668,7 +667,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
// //
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F // https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
// //
// I am not sure about meaning of "path" parameter; in my experiments // I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value // it is always "%2F", and omitting it or putting any other value
// results in 404. // results in 404.
// //

View File

@@ -17,12 +17,8 @@ var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSp
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64 var available, total, free int64
root, e := syscall.UTF16PtrFromString(f.root)
if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e)
}
_, _, e1 := getFreeDiskSpace.Call( _, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(root)), uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes

View File

@@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -21,7 +22,6 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
@@ -42,22 +42,9 @@ func init() {
Description: "Local Disk", Description: "Local Disk",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp, CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `Depending on which OS is in use the local backend may return only some
of the system metadata. Setting system metadata is supported on all
OSes but setting user metadata is only supported on linux, freebsd,
netbsd, macOS and Solaris. It is **not** supported on Windows yet
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
`,
},
Options: []fs.Option{{ Options: []fs.Option{{
Name: "nounc", Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows.", Help: "Disable UNC (long path names) conversion on Windows.",
Default: false,
Advanced: runtime.GOOS != "windows", Advanced: runtime.GOOS != "windows",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "true", Value: "true",
@@ -123,8 +110,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload. Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy - are being uploaded and aborts with a message which starts "can't copy
source file is being updated" if the file changes during upload. - source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g. However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -234,16 +221,15 @@ type Options struct {
// Fs represents a local filesystem rooted at root // Fs represents a local filesystem rooted at root
type Fs struct { type Fs struct {
name string // the name of the remote name string // the name of the remote
root string // The root directory (OS path) root string // The root directory (OS path)
opt Options // parsed config options opt Options // parsed config options
features *fs.Features // optional features features *fs.Features // optional features
dev uint64 // device number of root node dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'. warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat // do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error) lstat func(name string) (os.FileInfo, error)
@@ -287,19 +273,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
dev: devUnset, dev: devUnset,
lstat: os.Lstat, lstat: os.Lstat,
} }
if xattrSupported {
f.xattrSupported = 1
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(), CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
IsLocal: true, IsLocal: true,
SlowHash: true, SlowHash: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.FollowSymlinks { if opt.FollowSymlinks {
f.lstat = os.Stat f.lstat = os.Stat
@@ -444,8 +423,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
fsDirPath := f.localPath(dir) fsDirPath := f.localPath(dir)
_, err = os.Stat(fsDirPath) _, err = os.Stat(fsDirPath)
if err != nil { if err != nil {
@@ -496,14 +473,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue continue
} }
if fierr != nil { if fierr != nil {
// Don't report errors on any file names that are excluded err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
if useFilter {
newRemote := f.cleanRemote(dir, name)
if !filter.IncludeRemote(newRemote) {
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
fs.Errorf(dir, "%v", fierr) fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue continue
@@ -536,11 +506,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
mode = fi.Mode() mode = fi.Mode()
} }
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() { if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which // Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks. // are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -645,7 +610,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second precision = time.Second
// Create temporary file and test it // Create temporary file and test it
fd, err := os.CreateTemp("", "rclone") fd, err := ioutil.TempFile("", "rclone")
if err != nil { if err != nil {
// If failed return 1s // If failed return 1s
// fmt.Println("Failed to create temp file", err) // fmt.Println("Failed to create temp file", err)
@@ -714,9 +679,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Move src to this remote using server-side move operations. // Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given. // This is stored with the remote path given
// //
// It returns the destination Object and a possible error. // It returns the destination Object and a possible error
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -938,7 +903,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", fmt.Errorf("hash: failed to open: %w", err) return "", fmt.Errorf("hash: failed to open: %w", err)
} }
var hashes map[hash.Type]string var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r)) hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close() closeErr := in.Close()
if err != nil { if err != nil {
return "", fmt.Errorf("hash: failed to read: %w", err) return "", fmt.Errorf("hash: failed to read: %w", err)
@@ -972,22 +937,17 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime return o.modTime
} }
// Set the atime and ltime of the object
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
if o.translatedLink {
err = lChtimes(o.path, atime, mtime)
} else {
err = os.Chtimes(o.path, atime, mtime)
}
return err
}
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime { if o.fs.opt.NoSetModTime {
return nil return nil
} }
err := o.setTimes(modTime, modTime) var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
} else {
err = os.Chtimes(o.path, modTime, modTime)
}
if err != nil { if err != nil {
return err return err
} }
@@ -1072,7 +1032,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil { if err != nil {
return nil, err return nil, err
} }
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
} }
// Open an object for read // Open an object for read
@@ -1262,16 +1222,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// Fetch and set metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
err = o.writeMetadata(meta)
if err != nil {
return fmt.Errorf("failed to set metadata: %w", err)
}
// ReRead info now that we have finished // ReRead info now that we have finished
return o.lstat() return o.lstat()
} }
@@ -1370,56 +1320,31 @@ func (o *Object) Remove(ctx context.Context) error {
return remove(o.path) return remove(o.path)
} }
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata, err = o.getXattr()
if err != nil {
return nil, err
}
err = o.readMetadataFromFile(&metadata)
if err != nil {
return nil, err
}
return metadata, nil
}
// Write the metadata on the object
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
err = o.setXattr(metadata)
if err != nil {
return err
}
err = o.writeMetadataToFile(metadata)
if err != nil {
return err
}
return err
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string { func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") { if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) { if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s) s2, err := filepath.Abs(s)
if err == nil { if err == nil {
s = s2 s = s2
} }
} else {
s = filepath.Clean(s)
} }
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s) s = filepath.ToSlash(s)
vol := filepath.VolumeName(s) vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):]) s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s) s = filepath.FromSlash(s)
if !noUNC { if !noUNC {
// Convert to UNC // Convert to UNC
s = file.UNCPath(s) s = file.UNCPath(s)
} }
return s return s
} }
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s) s = enc.FromStandardPath(s)
return s return s
} }
@@ -1434,5 +1359,4 @@ var (
_ fs.Commander = &Fs{} _ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{} _ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.Metadataer = &Object{}
) )

View File

@@ -3,19 +3,15 @@ package local
import ( import (
"bytes" "bytes"
"context" "context"
"fmt" "io/ioutil"
"io"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"sort"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
@@ -33,6 +29,7 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating // Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) { func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test" filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now()) r.WriteFile(filePath, "content", time.Now())
@@ -77,6 +74,7 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
dir := f.root dir := f.root
@@ -148,7 +146,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object // Check reading the object
in, err := o.Open(ctx) in, err := o.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
contents, err := io.ReadAll(in) contents, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt", string(contents)) require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -156,7 +154,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range // Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err) require.NoError(t, err)
contents, err = io.ReadAll(in) contents, err = ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents)) require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close()) require.NoError(t, in.Close())
@@ -175,6 +173,7 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) { func TestHashOnUpdate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -189,7 +188,7 @@ func TestHashOnUpdate(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5) assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with different contents but same size and timestamp // Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT") var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f) src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src) err = o.Update(ctx, b, src)
@@ -205,6 +204,7 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) { func TestHashOnDelete(t *testing.T) {
ctx := context.Background() ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt" const filePath = "file.txt"
when := time.Now() when := time.Now()
r.WriteFile(filePath, "content", when) r.WriteFile(filePath, "content", when)
@@ -229,169 +229,3 @@ func TestHashOnDelete(t *testing.T) {
_, err = o.Hash(ctx, hash.MD5) _, err = o.Hash(ctx, hash.MD5)
require.Error(t, err) require.Error(t, err)
} }
func TestMetadata(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
whenRFC := when.Format(time.RFC3339Nano)
r.WriteFile(filePath, "metadata file contents", when)
f := r.Flocal.(*Fs)
// Get the object
obj, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
o := obj.(*Object)
features := f.Features()
var hasXID, hasAtime, hasBtime bool
switch runtime.GOOS {
case "darwin", "freebsd", "netbsd", "linux":
hasXID, hasAtime, hasBtime = true, true, true
case "openbsd", "solaris":
hasXID, hasAtime = true, true
case "windows":
hasAtime, hasBtime = true, true
case "plan9", "js":
// nada
default:
t.Errorf("No test cases for OS %q", runtime.GOOS)
}
assert.True(t, features.ReadMetadata)
assert.True(t, features.WriteMetadata)
assert.Equal(t, xattrSupported, features.UserMetadata)
t.Run("Xattr", func(t *testing.T) {
if !xattrSupported {
t.Skip()
}
m, err := o.getXattr()
require.NoError(t, err)
assert.Nil(t, m)
inM := fs.Metadata{
"potato": "chips",
"cabbage": "soup",
}
err = o.setXattr(inM)
require.NoError(t, err)
m, err = o.getXattr()
require.NoError(t, err)
assert.NotNil(t, m)
assert.Equal(t, inM, m)
})
checkTime := func(m fs.Metadata, key string, when time.Time) {
mt, ok := o.parseMetadataTime(m, key)
assert.True(t, ok)
dt := mt.Sub(when)
precision := time.Second
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
}
checkInt := func(m fs.Metadata, key string, base int) int {
value, ok := o.parseMetadataInt(m, key, base)
assert.True(t, ok)
return value
}
t.Run("Read", func(t *testing.T) {
m, err := o.Metadata(ctx)
require.NoError(t, err)
assert.NotNil(t, m)
// All OSes have these
checkInt(m, "mode", 8)
checkTime(m, "mtime", when)
assert.Equal(t, len(whenRFC), len(m["mtime"]))
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
if hasAtime {
checkTime(m, "atime", when)
}
if hasBtime {
checkTime(m, "btime", when)
}
if hasXID {
checkInt(m, "uid", 10)
checkInt(m, "gid", 10)
}
})
t.Run("Write", func(t *testing.T) {
newAtimeString := "2011-12-13T14:15:16.999999999Z"
newAtime := fstest.Time(newAtimeString)
newMtimeString := "2011-12-12T14:15:16.999999999Z"
newMtime := fstest.Time(newMtimeString)
newBtimeString := "2011-12-11T14:15:16.999999999Z"
newBtime := fstest.Time(newBtimeString)
newM := fs.Metadata{
"mtime": newMtimeString,
"atime": newAtimeString,
"btime": newBtimeString,
// Can't test uid, gid without being root
"mode": "0767",
"potato": "wedges",
}
err := o.writeMetadata(newM)
require.NoError(t, err)
m, err := o.Metadata(ctx)
require.NoError(t, err)
assert.NotNil(t, m)
mode := checkInt(m, "mode", 8)
if runtime.GOOS != "windows" {
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
}
checkTime(m, "mtime", newMtime)
if hasAtime {
checkTime(m, "atime", newAtime)
}
if haveSetBTime {
checkTime(m, "btime", newBtime)
}
if xattrSupported {
assert.Equal(t, "wedges", m["potato"])
}
})
}
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}

View File

@@ -11,8 +11,7 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "", RemoteName: "",
NilObject: (*local.Object)(nil), NilObject: (*local.Object)(nil),
QuickTestOK: true,
}) })
} }

View File

@@ -1,138 +0,0 @@
package local
import (
"fmt"
"os"
"runtime"
"strconv"
"time"
"github.com/rclone/rclone/fs"
)
const metadataTimeFormat = time.RFC3339Nano
// system metadata keys which this backend owns
//
// not all values supported on all OSes
var systemMetadataInfo = map[string]fs.MetadataHelp{
"mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"rdev": {
Help: "Device ID (if special file)",
Type: "hexadecimal",
Example: "1abc",
},
"atime": {
Help: "Time of last access",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"btime": {
Help: "Time of file birth (creation)",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
}
// parse a time string from metadata with key
func (o *Object) parseMetadataTime(m fs.Metadata, key string) (t time.Time, ok bool) {
value, ok := m[key]
if ok {
var err error
t, err = time.Parse(metadataTimeFormat, value)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
}
return t, ok
}
// parse am int from metadata with key and base
func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result int, ok bool) {
value, ok := m[key]
if ok {
var err error
result64, err := strconv.ParseInt(value, base, 64)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
ok = false
}
result = int(result64)
}
return result, ok
}
// Write the metadata into the file
//
// It isn't possible to set the ctime and btime under Unix
func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
var err error
atime, atimeOK := o.parseMetadataTime(m, "atime")
mtime, mtimeOK := o.parseMetadataTime(m, "mtime")
btime, btimeOK := o.parseMetadataTime(m, "btime")
if atimeOK || mtimeOK {
if atimeOK && !mtimeOK {
mtime = atime
}
if !atimeOK && mtimeOK {
atime = mtime
}
err = o.setTimes(atime, mtime)
if err != nil {
outErr = fmt.Errorf("failed to set times: %w", err)
}
}
if haveSetBTime {
if btimeOK {
err = setBTime(o.path, btime)
if err != nil {
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
}
}
}
uid, hasUID := o.parseMetadataInt(m, "uid", 10)
gid, hasGID := o.parseMetadataInt(m, "gid", 10)
if hasUID {
// FIXME should read UID and GID of current user and only attempt to set it if different
if !hasGID {
gid = uid
}
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
} else {
err = os.Chown(o.path, uid, gid)
if err != nil {
outErr = fmt.Errorf("failed to change ownership: %w", err)
}
}
}
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
if hasMode {
err = os.Chmod(o.path, os.FileMode(mode))
if err != nil {
outErr = fmt.Errorf("failed to change permissions: %w", err)
}
}
// FIXME not parsing rdev yet
return outErr
}

View File

@@ -1,38 +0,0 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package local
import (
"fmt"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(o, "didn't return Stat_t as expected")
return nil
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t syscall.Timespec) {
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
}
setTime("atime", stat.Atimespec)
setTime("mtime", stat.Mtimespec)
setTime("btime", stat.Birthtimespec)
return nil
}

View File

@@ -1,102 +0,0 @@
//go:build linux
// +build linux
package local
import (
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
unix.STATX_UID | // Want stx_uid
unix.STATX_GID | // Want stx_gid
unix.STATX_ATIME | // Want stx_atime
unix.STATX_MTIME | // Want stx_mtime
unix.STATX_CTIME | // Want stx_ctime
unix.STATX_BTIME), // Want stx_btime
&stat)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev_major != 0 || stat.Rdev_minor != 0 {
m.Set("rdev", fmt.Sprintf("%x", uint64(stat.Rdev_major)<<32|uint64(stat.Rdev_minor)))
}
setTime := func(key string, t unix.StatxTimestamp) {
m.Set(key, time.Unix(t.Sec, int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atime)
setTime("mtime", stat.Mtime)
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,21 +0,0 @@
//go:build plan9 || js
// +build plan9 js
package local
import (
"fmt"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
m.Set("mtime", info.ModTime().Format(metadataTimeFormat))
return nil
}

View File

@@ -1,37 +0,0 @@
//go:build openbsd || solaris
// +build openbsd solaris
package local
import (
"fmt"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(o, "didn't return Stat_t as expected")
return nil
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t syscall.Timespec) {
m.Set(key, time.Unix(t.Unix()).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,34 +0,0 @@
//go:build windows
// +build windows
package local
import (
"fmt"
"syscall"
"time"
"github.com/rclone/rclone/fs"
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
info, err := o.fs.lstat(o.path)
if err != nil {
return err
}
stat, ok := info.Sys().(*syscall.Win32FileAttributeData)
if !ok {
fs.Debugf(o, "didn't return Win32FileAttributeData as expected")
return nil
}
// FIXME do something with stat.FileAttributes ?
m.Set("mode", fmt.Sprintf("%0o", info.Mode()))
setTime := func(key string, t syscall.Filetime) {
m.Set(key, time.Unix(0, t.Nanoseconds()).Format(metadataTimeFormat))
}
setTime("atime", stat.LastAccessTime)
setTime("mtime", stat.LastWriteTime)
setTime("btime", stat.CreationTime)
return nil
}

View File

@@ -1,6 +1,7 @@
package local package local
import ( import (
"io/ioutil"
"os" "os"
"sync" "sync"
"testing" "testing"
@@ -12,7 +13,7 @@ import (
// Check we can remove an open file // Check we can remove an open file
func TestRemove(t *testing.T) { func TestRemove(t *testing.T) {
fd, err := os.CreateTemp("", "rclone-remove-test") fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err) require.NoError(t, err)
name := fd.Name() name := fd.Name()
defer func() { defer func() {

Some files were not shown because too many files have changed in this diff Show More