mirror of
https://github.com/rclone/rclone.git
synced 2025-12-11 22:03:17 +00:00
Compare commits
1 Commits
v1.65.1
...
fix-connec
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bede3a5d48 |
4
.github/FUNDING.yml
vendored
Normal file
4
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
github: [ncw]
|
||||||
|
patreon: njcw
|
||||||
|
liberapay: ncw
|
||||||
|
custom: ["https://rclone.org/donate/"]
|
||||||
11
.github/ISSUE_TEMPLATE/Bug.md
vendored
11
.github/ISSUE_TEMPLATE/Bug.md
vendored
@@ -9,7 +9,7 @@ We understand you are having a problem with rclone; we want to help you with tha
|
|||||||
|
|
||||||
**STOP and READ**
|
**STOP and READ**
|
||||||
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
|
||||||
Please show the effort you've put into solving the problem and please be specific.
|
Please show the effort you've put in to solving the problem and please be specific.
|
||||||
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
People are volunteering their time to help! Low effort posts are not likely to get good answers!
|
||||||
|
|
||||||
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
|
||||||
@@ -37,6 +37,7 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
@@ -64,11 +65,3 @@ The Rclone Developers
|
|||||||
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your bug report. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
9
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -26,6 +26,7 @@ The Rclone Developers
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
#### The associated forum post URL from `https://forum.rclone.org`
|
#### The associated forum post URL from `https://forum.rclone.org`
|
||||||
|
|
||||||
|
|
||||||
@@ -41,11 +42,3 @@ The Rclone Developers
|
|||||||
#### How do you think rclone should be changed to solve that?
|
#### How do you think rclone should be changed to solve that?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<!--- Please keep the note below for others who read your feature request. -->
|
|
||||||
|
|
||||||
#### How to use GitHub
|
|
||||||
|
|
||||||
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
|
|
||||||
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
|
|
||||||
* Subscribe to receive notifications on status change and new comments.
|
|
||||||
|
|||||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ Link issues and relevant forum posts here.
|
|||||||
|
|
||||||
#### Checklist
|
#### Checklist
|
||||||
|
|
||||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-new-feature-or-bug-fix).
|
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||||
- [ ] I have added documentation for the changes if appropriate.
|
- [ ] I have added documentation for the changes if appropriate.
|
||||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||||
|
|||||||
6
.github/dependabot.yml
vendored
6
.github/dependabot.yml
vendored
@@ -1,6 +0,0 @@
|
|||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
303
.github/workflows/build.yml
vendored
303
.github/workflows/build.yml
vendored
@@ -8,49 +8,33 @@ name: build
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- '**'
|
- '*'
|
||||||
tags:
|
tags:
|
||||||
- '**'
|
- '*'
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
manual:
|
|
||||||
description: Manual run (bypass default conditions)
|
|
||||||
type: boolean
|
|
||||||
required: true
|
|
||||||
default: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.21'
|
go: '1.16.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
librclonetest: true
|
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: linux_386
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.21'
|
|
||||||
goarch: 386
|
|
||||||
gotags: cmount
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-11
|
os: macOS-latest
|
||||||
go: '1.21'
|
go: '1.16.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -58,38 +42,54 @@ jobs:
|
|||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-11
|
os: macOS-latest
|
||||||
go: '1.21'
|
go: '1.16.x'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows_amd64
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '1.21'
|
go: '1.16.x'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
build_flags: '-include "^windows/amd64" -cgo'
|
||||||
build_flags: '-include "^windows/"'
|
build_args: '-buildmode exe'
|
||||||
|
quicktest: true
|
||||||
|
racequicktest: true
|
||||||
|
deploy: true
|
||||||
|
|
||||||
|
- job_name: windows_386
|
||||||
|
os: windows-latest
|
||||||
|
go: '1.16.x'
|
||||||
|
gotags: cmount
|
||||||
|
goarch: '386'
|
||||||
|
cgo: '1'
|
||||||
|
build_flags: '-include "^windows/386" -cgo'
|
||||||
build_args: '-buildmode exe'
|
build_args: '-buildmode exe'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.21'
|
go: '1.16.x'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.19
|
- job_name: go1.13
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.19'
|
go: '1.13.x'
|
||||||
|
quicktest: true
|
||||||
|
|
||||||
|
- job_name: go1.14
|
||||||
|
os: ubuntu-latest
|
||||||
|
go: '1.14.x'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
- job_name: go1.20
|
- job_name: go1.15
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.20'
|
go: '1.15.x'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -99,15 +99,15 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
|
stable: 'false'
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Set environment variables
|
- name: Set environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -124,20 +124,15 @@ jobs:
|
|||||||
sudo modprobe fuse
|
sudo modprobe fuse
|
||||||
sudo chmod 666 /dev/fuse
|
sudo chmod 666 /dev/fuse
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
sudo chown root:$USER /etc/fuse.conf
|
||||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
- name: Install Libraries on macOS
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
|
||||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
|
||||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
|
||||||
brew untap --force homebrew/core
|
|
||||||
brew untap --force homebrew/cask
|
|
||||||
brew update
|
brew update
|
||||||
brew install --cask macfuse
|
brew install --cask macfuse
|
||||||
if: matrix.os == 'macos-11'
|
if: matrix.os == 'macOS-latest'
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
- name: Install Libraries on Windows
|
||||||
shell: powershell
|
shell: powershell
|
||||||
@@ -168,7 +163,7 @@ jobs:
|
|||||||
env
|
env
|
||||||
|
|
||||||
- name: Go module cache
|
- name: Go module cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
@@ -180,11 +175,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: Rclone version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
rclone version
|
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
@@ -197,13 +187,12 @@ jobs:
|
|||||||
make racequicktest
|
make racequicktest
|
||||||
if: matrix.racequicktest
|
if: matrix.racequicktest
|
||||||
|
|
||||||
- name: Run librclone tests
|
- name: Code quality test
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -C librclone/ctest test
|
make build_dep
|
||||||
make -C librclone/ctest clean
|
make check
|
||||||
librclone/python/test_rclone.py
|
if: matrix.check
|
||||||
if: matrix.librclonetest
|
|
||||||
|
|
||||||
- name: Compile all architectures test
|
- name: Compile all architectures test
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -216,144 +205,106 @@ jobs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
|
||||||
|
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
|
||||||
make ci_beta
|
make ci_beta
|
||||||
env:
|
env:
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
# working-directory: '$(modulePath)'
|
# working-directory: '$(modulePath)'
|
||||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
|
||||||
timeout-minutes: 30
|
|
||||||
name: "lint"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Code quality test
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
# Run govulncheck on the latest go version, the one we build binaries with
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
check-latest: true
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Scan for vulnerabilities
|
|
||||||
run: govulncheck ./...
|
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
timeout-minutes: 30
|
||||||
timeout-minutes: 30
|
name: "android-all"
|
||||||
name: "android-all"
|
runs-on: ubuntu-latest
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
steps:
|
# Upgrade together with NDK version
|
||||||
- name: Checkout
|
- name: Set up Go 1.14
|
||||||
uses: actions/checkout@v4
|
uses: actions/setup-go@v1
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
go-version: 1.14
|
||||||
|
|
||||||
# Upgrade together with NDK version
|
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||||
- name: Set up Go
|
- name: Force NDK version
|
||||||
uses: actions/setup-go@v5
|
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
|
|
||||||
- name: Go module cache
|
- name: Go module cache
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
echo "VERSION=$(make version)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: build native rclone
|
- name: build native rclone
|
||||||
run: |
|
run: |
|
||||||
make
|
make
|
||||||
|
|
||||||
- name: install gomobile
|
- name: arm-v7a Set environment variables
|
||||||
run: |
|
shell: bash
|
||||||
go install golang.org/x/mobile/cmd/gobind@latest
|
run: |
|
||||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||||
env PATH=$PATH:~/go/bin gomobile init
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||||
|
echo 'GOARM=7' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
- name: arm-v7a build
|
||||||
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||||
|
|
||||||
- name: arm-v7a gomobile build
|
- name: arm64-v8a Set environment variables
|
||||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm-v7a Set environment variables
|
- name: arm64-v8a build
|
||||||
shell: bash
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
|
||||||
echo 'GOARM=7' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm-v7a build
|
- name: x86 Set environment variables
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: arm64-v8a Set environment variables
|
- name: x86 build
|
||||||
shell: bash
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: arm64-v8a build
|
- name: x64 Set environment variables
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||||
|
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||||
|
echo 'GOOS=android' >> $GITHUB_ENV
|
||||||
|
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||||
|
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: x86 Set environment variables
|
- name: x64 build
|
||||||
shell: bash
|
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||||
run: |
|
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x86 build
|
- name: Upload artifacts
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
run: |
|
||||||
|
make ci_upload
|
||||||
- name: x64 Set environment variables
|
env:
|
||||||
shell: bash
|
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||||
run: |
|
# Upload artifacts if not a PR && not a fork
|
||||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
|
||||||
echo 'GOOS=android' >> $GITHUB_ENV
|
|
||||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
|
||||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: x64 build
|
|
||||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
|
||||||
run: |
|
|
||||||
make ci_upload
|
|
||||||
env:
|
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
|
||||||
# Upload artifacts if not a PR && not a fork
|
|
||||||
if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone'
|
|
||||||
|
|||||||
@@ -1,77 +0,0 @@
|
|||||||
name: Docker beta build
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build image job
|
|
||||||
steps:
|
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
- name: Extract metadata (tags, labels) for Docker
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: ghcr.io/${{ github.repository }}
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
# This is the user that triggered the Workflow. In this case, it will
|
|
||||||
# either be the user whom created the Release or manually triggered
|
|
||||||
# the workflow_dispatch.
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
# `secrets.GITHUB_TOKEN` is a secret that's automatically generated by
|
|
||||||
# GitHub Actions at the start of a workflow run to identify the job.
|
|
||||||
# This is used to authenticate against GitHub Container Registry.
|
|
||||||
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
|
|
||||||
# for more detailed information.
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Show disk usage
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
- name: Build and publish image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
file: Dockerfile
|
|
||||||
context: .
|
|
||||||
push: true # push the image to ghcr
|
|
||||||
tags: |
|
|
||||||
ghcr.io/rclone/rclone:beta
|
|
||||||
rclone/rclone:beta
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
|
||||||
cache-from: type=gha, scope=${{ github.workflow }}
|
|
||||||
cache-to: type=gha, mode=max, scope=${{ github.workflow }}
|
|
||||||
provenance: false
|
|
||||||
# Eventually cache will need to be cleared if builds more frequent than once a week
|
|
||||||
# https://github.com/docker/build-push-action/issues/252
|
|
||||||
- name: Show disk usage
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
25
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
25
.github/workflows/build_publish_docker_image.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
name: Docker beta build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Build image job
|
||||||
|
steps:
|
||||||
|
- name: Checkout master
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Build and publish image
|
||||||
|
uses: ilteoood/docker_buildx@1.1.0
|
||||||
|
with:
|
||||||
|
tag: beta
|
||||||
|
imageName: rclone/rclone
|
||||||
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
|
publish: true
|
||||||
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
@@ -6,21 +6,11 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Build image job
|
name: Build image job
|
||||||
steps:
|
steps:
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Get actual patch version
|
- name: Get actual patch version
|
||||||
@@ -37,41 +27,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
imageName: rclone/rclone
|
imageName: rclone/rclone
|
||||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
|
||||||
publish: true
|
publish: true
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
|
|
||||||
build_docker_volume_plugin:
|
|
||||||
if: github.repository == 'rclone/rclone'
|
|
||||||
needs: build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Build docker plugin job
|
|
||||||
steps:
|
|
||||||
- name: Free some space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
df -h .
|
|
||||||
# Remove android SDK
|
|
||||||
sudo rm -rf /usr/local/lib/android || true
|
|
||||||
# Remove .net runtime
|
|
||||||
sudo rm -rf /usr/share/dotnet || true
|
|
||||||
df -h .
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Build and publish docker plugin
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
VER=${GITHUB_REF#refs/tags/}
|
|
||||||
PLUGIN_USER=rclone
|
|
||||||
docker login --username ${{ secrets.DOCKER_HUB_USER }} \
|
|
||||||
--password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
|
||||||
for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do
|
|
||||||
export PLUGIN_USER PLUGIN_ARCH
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}
|
|
||||||
make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v}
|
|
||||||
done
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest
|
|
||||||
make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v}
|
|
||||||
|
|||||||
14
.github/workflows/winget.yml
vendored
14
.github/workflows/winget.yml
vendored
@@ -1,14 +0,0 @@
|
|||||||
name: Publish to Winget
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [released]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
|
||||||
with:
|
|
||||||
identifier: Rclone.Rclone
|
|
||||||
installers-regex: '-windows-\w+\.zip$'
|
|
||||||
token: ${{ secrets.WINGET_TOKEN }}
|
|
||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -8,13 +8,6 @@ rclone.iml
|
|||||||
.idea
|
.idea
|
||||||
.history
|
.history
|
||||||
*.test
|
*.test
|
||||||
|
*.log
|
||||||
*.iml
|
*.iml
|
||||||
fuzz-build.zip
|
fuzz-build.zip
|
||||||
*.orig
|
|
||||||
*.rej
|
|
||||||
Thumbs.db
|
|
||||||
__pycache__
|
|
||||||
.DS_Store
|
|
||||||
/docs/static/img/logos/
|
|
||||||
resource_windows_*.syso
|
|
||||||
.devcontainer
|
|
||||||
|
|||||||
@@ -2,17 +2,15 @@
|
|||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
- deadcode
|
||||||
- errcheck
|
- errcheck
|
||||||
- goimports
|
- goimports
|
||||||
- revive
|
- golint
|
||||||
- ineffassign
|
- ineffassign
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
- govet
|
- govet
|
||||||
- unconvert
|
- unconvert
|
||||||
- staticcheck
|
|
||||||
- gosimple
|
|
||||||
- stylecheck
|
|
||||||
- unused
|
|
||||||
- misspell
|
|
||||||
#- prealloc
|
#- prealloc
|
||||||
#- maligned
|
#- maligned
|
||||||
disable-all: true
|
disable-all: true
|
||||||
@@ -22,79 +20,7 @@ issues:
|
|||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
exclude-rules:
|
|
||||||
|
|
||||||
- linters:
|
|
||||||
- staticcheck
|
|
||||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
|
||||||
|
|
||||||
# don't disable the revive messages about comments on exported functions
|
|
||||||
include:
|
|
||||||
- EXC0012
|
|
||||||
- EXC0013
|
|
||||||
- EXC0014
|
|
||||||
- EXC0015
|
|
||||||
|
|
||||||
run:
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
timeout: 10m
|
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
revive:
|
|
||||||
# setting rules seems to disable all the rules, so re-enable them here
|
|
||||||
rules:
|
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
- name: empty-block
|
|
||||||
disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
- name: increment-decrement
|
|
||||||
disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
- name: redefines-builtin-id
|
|
||||||
disabled: true
|
|
||||||
- name: superfluous-else
|
|
||||||
disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
- name: unreachable-code
|
|
||||||
disabled: true
|
|
||||||
- name: unused-parameter
|
|
||||||
disabled: true
|
|
||||||
- name: var-declaration
|
|
||||||
disabled: false
|
|
||||||
- name: var-naming
|
|
||||||
disabled: false
|
|
||||||
stylecheck:
|
|
||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
|
||||||
|
|||||||
454
CONTRIBUTING.md
454
CONTRIBUTING.md
@@ -1,8 +1,8 @@
|
|||||||
# Contributing to rclone
|
# Contributing to rclone #
|
||||||
|
|
||||||
This is a short guide on how to contribute things to rclone.
|
This is a short guide on how to contribute things to rclone.
|
||||||
|
|
||||||
## Reporting a bug
|
## Reporting a bug ##
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug
|
If you've just got a question or aren't sure if you've found a bug
|
||||||
then please use the [rclone forum](https://forum.rclone.org/) instead
|
then please use the [rclone forum](https://forum.rclone.org/) instead
|
||||||
@@ -12,172 +12,95 @@ When filing an issue, please include the following information if
|
|||||||
possible as well as a description of the problem. Make sure you test
|
possible as well as a description of the problem. Make sure you test
|
||||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||||
|
|
||||||
- Rclone version (e.g. output from `rclone version`)
|
* Rclone version (e.g. output from `rclone -V`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
* if the log contains secrets then edit the file with a text editor first to obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a pull request ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
If it is a big feature then make an issue first so it can be discussed.
|
||||||
|
|
||||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||||
|
getting started docs](https://golang.org/doc/install) for more info.
|
||||||
|
|
||||||
|
First in your web browser press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
Now in your terminal
|
||||||
|
|
||||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
|
||||||
|
|
||||||
git clone https://github.com/rclone/rclone.git
|
git clone https://github.com/rclone/rclone.git
|
||||||
cd rclone
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
# if you have SSH keys setup in your GitHub account:
|
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
# otherwise:
|
|
||||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
|
||||||
|
|
||||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
|
||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
|
||||||
|
|
||||||
go version
|
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
|
||||||
|
|
||||||
go build
|
go build
|
||||||
./rclone version
|
|
||||||
|
|
||||||
(Note that you can also replace `go build` with `make`, which will include a
|
Make a branch to add your new feature
|
||||||
more accurate version number in the executable as well as enable you to specify
|
|
||||||
more build options.) Finally make a branch to add your new feature
|
|
||||||
|
|
||||||
git checkout -b my-new-feature
|
git checkout -b my-new-feature
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
When ready - run the unit tests for the code you changed
|
||||||
|
|
||||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
|
||||||
|
|
||||||
cd folder/with/changed/files
|
|
||||||
go test -v
|
go test -v
|
||||||
|
|
||||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
Note the top level Makefile targets
|
||||||
|
|
||||||
|
* make check
|
||||||
|
* make test
|
||||||
|
|
||||||
|
Both of these will be run by Travis when you make a pull request but
|
||||||
|
you can do this yourself locally too. These require some extra go
|
||||||
|
packages which you can install with
|
||||||
|
|
||||||
|
* make build_dep
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
- Add [unit tests](#testing) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
- Add [documentation](#writing-documentation) for a new feature.
|
* Follow the [commit message guidelines](#commit-messages).
|
||||||
- [Commit your changes](#committing-your-changes) using the [commit message guidelines](#commit-messages).
|
* Add [unit tests](#testing) for a new feature
|
||||||
|
* squash commits down to one per feature
|
||||||
|
* rebase to master with `git rebase master`
|
||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that
|
||||||
|
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
|
|
||||||
and open the GitHub website to [create your pull
|
Go to the GitHub website and click [Create pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
You patch will get reviewed and you might get asked to fix some stuff.
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
|
||||||
|
```
|
||||||
|
git log # See how many commits you want to squash
|
||||||
|
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
|
||||||
|
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
|
||||||
|
git commit # Add a new commit message.
|
||||||
|
git push --force # Push the squashed commit to your GitHub repo.
|
||||||
|
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
|
||||||
|
```
|
||||||
|
|
||||||
## Using Git and GitHub
|
## CI for your fork ##
|
||||||
|
|
||||||
### Committing your changes
|
|
||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
|
||||||
|
|
||||||
git checkout my-new-feature # To switch to your branch
|
|
||||||
git status # To see the new and changed files
|
|
||||||
git add FILENAME # To select FILENAME for the commit
|
|
||||||
git status # To verify the changes to be committed
|
|
||||||
git commit # To do the commit
|
|
||||||
git log # To verify the commit. Use q to quit the log
|
|
||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
|
||||||
|
|
||||||
git commit --amend
|
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
|
||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
|
||||||
|
|
||||||
git push --force origin my-new-feature
|
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
|
||||||
|
|
||||||
git checkout master
|
|
||||||
git fetch upstream
|
|
||||||
git merge --ff-only
|
|
||||||
git push origin --follow-tags # optional update of your fork in GitHub
|
|
||||||
git checkout my-new-feature
|
|
||||||
git rebase master
|
|
||||||
|
|
||||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
### Squashing your commits ###
|
|
||||||
|
|
||||||
To combine your commits into one commit:
|
|
||||||
|
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
|
||||||
git status # To check everything is as expected
|
|
||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
|
||||||
|
|
||||||
git commit # To commit the undone commits as one
|
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
|
||||||
|
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
|
||||||
|
|
||||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing
|
## Testing ##
|
||||||
|
|
||||||
### Code quality tests
|
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
|
||||||
|
|
||||||
### Quick testing
|
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
|
||||||
|
|
||||||
make quicktest
|
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
|
||||||
|
|
||||||
### Backend testing
|
|
||||||
|
|
||||||
rclone contains a mixture of unit tests and integration tests.
|
rclone contains a mixture of unit tests and integration tests.
|
||||||
Because it is difficult (and in some respects pointless) to test cloud
|
Because it is difficult (and in some respects pointless) to test cloud
|
||||||
storage systems by mocking all their interfaces, rclone unit tests can
|
storage systems by mocking all their interfaces, rclone unit tests can
|
||||||
@@ -211,71 +134,62 @@ project root:
|
|||||||
go install github.com/rclone/rclone/fstest/test_all
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
test_all -backend drive
|
test_all -backend drive
|
||||||
|
|
||||||
### Full integration testing
|
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
make check
|
|
||||||
make test
|
make test
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
This command is run daily on the integration test server. You can
|
||||||
|
|
||||||
make build_dep
|
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation ##
|
||||||
|
|
||||||
Rclone code is organised into a small number of top level directories
|
Rclone code is organised into a small number of top level directories
|
||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
* backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
* all - import this to load all the cloud providers
|
||||||
- ...providers
|
* ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
* bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
* cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
* all - import this to load all the commands
|
||||||
- ...commands
|
* ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
* docs - the documentation and website
|
||||||
- docs - the documentation and website
|
* content - adjust these docs only - everything else is autogenerated
|
||||||
- content - adjust these docs only - everything else is autogenerated
|
* command - these are auto generated - edit the corresponding .go file
|
||||||
- command - these are auto-generated - edit the corresponding .go file
|
* fs - main rclone definitions - minimal amount of code
|
||||||
- fs - main rclone definitions - minimal amount of code
|
* accounting - bandwidth limiting and statistics
|
||||||
- accounting - bandwidth limiting and statistics
|
* asyncreader - an io.Reader which reads ahead
|
||||||
- asyncreader - an io.Reader which reads ahead
|
* config - manage the config file and flags
|
||||||
- config - manage the config file and flags
|
* driveletter - detect if a name is a drive letter
|
||||||
- driveletter - detect if a name is a drive letter
|
* filter - implements include/exclude filtering
|
||||||
- filter - implements include/exclude filtering
|
* fserrors - rclone specific error handling
|
||||||
- fserrors - rclone specific error handling
|
* fshttp - http handling for rclone
|
||||||
- fshttp - http handling for rclone
|
* fspath - path handling for rclone
|
||||||
- fspath - path handling for rclone
|
* hash - defines rclone's hash types and functions
|
||||||
- hash - defines rclone's hash types and functions
|
* list - list a remote
|
||||||
- list - list a remote
|
* log - logging facilities
|
||||||
- log - logging facilities
|
* march - iterates directories in lock step
|
||||||
- march - iterates directories in lock step
|
* object - in memory Fs objects
|
||||||
- object - in memory Fs objects
|
* operations - primitives for sync, e.g. Copy, Move
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
* sync - sync directories
|
||||||
- sync - sync directories
|
* walk - walk a directory
|
||||||
- walk - walk a directory
|
* fstest - provides integration test framework
|
||||||
- fstest - provides integration test framework
|
* fstests - integration tests for the backends
|
||||||
- fstests - integration tests for the backends
|
* mockdir - mocks an fs.Directory
|
||||||
- mockdir - mocks an fs.Directory
|
* mockobject - mocks an fs.Object
|
||||||
- mockobject - mocks an fs.Object
|
* test_all - Runs integration tests for everything
|
||||||
- test_all - Runs integration tests for everything
|
* graphics - the images used in the website, etc.
|
||||||
- graphics - the images used in the website, etc.
|
* lib - libraries used by the backend
|
||||||
- lib - libraries used by the backend
|
* atexit - register functions to run when rclone exits
|
||||||
- atexit - register functions to run when rclone exits
|
* dircache - directory ID to name caching
|
||||||
- dircache - directory ID to name caching
|
* oauthutil - helpers for using oauth
|
||||||
- oauthutil - helpers for using oauth
|
* pacer - retries with backoff and paces operations
|
||||||
- pacer - retries with backoff and paces operations
|
* readers - a selection of useful io.Readers
|
||||||
- readers - a selection of useful io.Readers
|
* rest - a thin abstraction over net/http for REST
|
||||||
- rest - a thin abstraction over net/http for REST
|
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
|
||||||
|
|
||||||
## Writing Documentation
|
## Writing Documentation ##
|
||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
@@ -284,49 +198,28 @@ If you add a new general flag (not for a backend), then document it in
|
|||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field. The first line of this is used
|
||||||
|
for the flag help, the remainder is shown to the user in `rclone
|
||||||
- Start with the most important information about the option,
|
config` and is added to the docs with `make backenddocs`.
|
||||||
as a single sentence on a single line.
|
|
||||||
- This text will be used for the command-line flag help.
|
|
||||||
- It will be combined with other information, such as any default value,
|
|
||||||
and the result will look odd if not written as a single sentence.
|
|
||||||
- It should end with a period/full stop character, which will be shown
|
|
||||||
in docs but automatically removed when producing the flag help.
|
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
|
||||||
and two line breaks creates a new paragraph.
|
|
||||||
- This text will be shown to the user in `rclone config`
|
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
|
||||||
normally run some time before next release).
|
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
|
||||||
a bit different than the main option help text. They will be shown
|
|
||||||
as an unordered list, therefore a single line break is enough to
|
|
||||||
create a new list item. Also, for enumeration texts like name of
|
|
||||||
countries, it looks better without an ending period/full stop character.
|
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
|
||||||
from those during the release process. See the `make doc` and `make
|
from those during the release process. See the `make doc` and `make
|
||||||
website` targets in the Makefile if you are interested in how. You
|
website` targets in the Makefile if you are interested in how. You
|
||||||
don't need to run these when adding a feature.
|
don't need to run these when adding a feature.
|
||||||
|
|
||||||
Documentation for rclone sub commands is with their code, e.g.
|
Documentation for rclone sub commands is with their code, e.g.
|
||||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
`cmd/ls/ls.go`.
|
||||||
line, without a period/full stop character at the end, as it will be
|
|
||||||
combined unmodified with other information (such as any default value).
|
|
||||||
|
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
for small changes in the docs which makes it very easy.
|
for small changes in the docs which makes it very easy.
|
||||||
|
|
||||||
## Making a release
|
## Making a release ##
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
file.
|
file.
|
||||||
|
|
||||||
## Commit messages
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change that a user (not a developer) of rclone would like to read, and
|
||||||
@@ -359,7 +252,7 @@ And here is an example of a longer one:
|
|||||||
```
|
```
|
||||||
mount: fix hang on errored upload
|
mount: fix hang on errored upload
|
||||||
|
|
||||||
In certain circumstances, if an upload failed then the mount could hang
|
In certain circumstances if an upload failed then the mount could hang
|
||||||
indefinitely. This was fixed by closing the read pipe after the Put
|
indefinitely. This was fixed by closing the read pipe after the Put
|
||||||
completed. This will cause the write side to return a pipe closed
|
completed. This will cause the write side to return a pipe closed
|
||||||
error fixing the hang.
|
error fixing the hang.
|
||||||
@@ -367,7 +260,7 @@ error fixing the hang.
|
|||||||
Fixes #1498
|
Fixes #1498
|
||||||
```
|
```
|
||||||
|
|
||||||
## Adding a dependency
|
## Adding a dependency ##
|
||||||
|
|
||||||
rclone uses the [go
|
rclone uses the [go
|
||||||
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
|
||||||
@@ -379,7 +272,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
go get github.com/ncw/new_dependency
|
GO111MODULE=on go get github.com/ncw/new_dependency
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -387,15 +280,15 @@ go docs linked above), but don't unless you really need to.
|
|||||||
Please check in the changes generated by `go mod` including `go.mod`
|
Please check in the changes generated by `go mod` including `go.mod`
|
||||||
and `go.sum` in the same commit as your other changes.
|
and `go.sum` in the same commit as your other changes.
|
||||||
|
|
||||||
## Updating a dependency
|
## Updating a dependency ##
|
||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
go get golang.org/x/crypto
|
GO111MODULE=on go get -u github.com/pkg/errors
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
## Updating all the dependencies
|
## Updating all the dependencies ##
|
||||||
|
|
||||||
In order to update all the dependencies then run `make update`. This
|
In order to update all the dependencies then run `make update`. This
|
||||||
just uses the go modules to update all the modules to their latest
|
just uses the go modules to update all the modules to their latest
|
||||||
@@ -404,7 +297,7 @@ stable release. Check in the changes in a single commit as above.
|
|||||||
This should be done early in the release cycle to pick up new versions
|
This should be done early in the release cycle to pick up new versions
|
||||||
of packages in time for them to get some testing.
|
of packages in time for them to get some testing.
|
||||||
|
|
||||||
## Updating a backend
|
## Updating a backend ##
|
||||||
|
|
||||||
If you update a backend then please run the unit tests and the
|
If you update a backend then please run the unit tests and the
|
||||||
integration tests for that backend.
|
integration tests for that backend.
|
||||||
@@ -419,133 +312,82 @@ integration tests.
|
|||||||
|
|
||||||
The next section goes into more detail about the tests.
|
The next section goes into more detail about the tests.
|
||||||
|
|
||||||
## Writing a new backend
|
## Writing a new backend ##
|
||||||
|
|
||||||
Choose a name. The docs here will use `remote` as an example.
|
Choose a name. The docs here will use `remote` as an example.
|
||||||
|
|
||||||
Note that in rclone terminology a file system backend is called a
|
Note that in rclone terminology a file system backend is called a
|
||||||
remote or an fs.
|
remote or an fs.
|
||||||
|
|
||||||
### Research
|
Research
|
||||||
|
|
||||||
- Look at the interfaces defined in `fs/types.go`
|
* Look at the interfaces defined in `fs/fs.go`
|
||||||
- Study one or more of the existing remotes
|
* Study one or more of the existing remotes
|
||||||
|
|
||||||
### Getting going
|
Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
* box is a good one to start from if you have a directory based remote
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
* b2 is a good one to start from if you have a bucket based remote
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
* `rclone purge -v TestRemote:rclone-info`
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
* open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
Unit tests
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
* Create a config entry called `TestRemote` for the unit tests to use
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
* Make sure all tests pass with `go test -v`
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
|
||||||
|
|
||||||
### Unit tests
|
Integration tests
|
||||||
|
|
||||||
- Create a config entry called `TestRemote` for the unit tests to use
|
* Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
* Once you've done that then you can use the integration test framework from the project root:
|
||||||
- Make sure all tests pass with `go test -v`
|
* go install ./...
|
||||||
|
* test_all -backends remote
|
||||||
### Integration tests
|
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
|
||||||
- Once you've done that then you can use the integration test framework from the project root:
|
|
||||||
- go install ./...
|
|
||||||
- test_all -backends remote
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
* `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
* `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
* If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
### Backend documentation
|
Add your fs to the docs - you'll need to pick an icon for it from
|
||||||
|
|
||||||
Add your backend to the docs - you'll need to pick an icon for it from
|
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
* `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
* update them with `make backenddocs` - revert any changes in other backends
|
||||||
- `docs/content/overview.md` - overview docs
|
* `docs/content/overview.md` - overview docs
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
* `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
- `bin/make_manual.py` - add the page to the `docs` constant
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## Adding a new s3 provider
|
## Writing a plugin ##
|
||||||
|
|
||||||
It is quite easy to add a new S3 provider to rclone.
|
|
||||||
|
|
||||||
You'll need to modify the following files
|
|
||||||
|
|
||||||
- `backend/s3/s3.go`
|
|
||||||
- Add the provider to `providerOption` at the top of the file
|
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
|
||||||
- Exclude your provider from genric config questions (eg `region` and `endpoint).
|
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
|
||||||
- `docs/content/s3.md`
|
|
||||||
- Add the provider at the top of the page.
|
|
||||||
- Add a section about the provider linked from there.
|
|
||||||
- Add a transcript of a trial `rclone config` session
|
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
|
||||||
- `README.md` - this is the home page in github
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
|
||||||
|
|
||||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
|
||||||
alphabetical order by `Provider` name, but with `AWS` first and
|
|
||||||
`Other` last.
|
|
||||||
|
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
|
||||||
in the web browser and the links (internal and external) all work.
|
|
||||||
|
|
||||||
Once you've written the code, test `rclone config` works to your
|
|
||||||
satisfaction, and check the integration tests work `go test -v -remote
|
|
||||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
|
||||||
pass. Some providers just can't pass the tests with control characters
|
|
||||||
in the names so if these fail and the provider doesn't support
|
|
||||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
|
||||||
`SetTier` test may also fail on non AWS providers.
|
|
||||||
|
|
||||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
|
||||||
|
|
||||||
## Writing a plugin
|
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
### Usage
|
Usage
|
||||||
|
|
||||||
- Naming
|
- Naming
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
@@ -560,7 +402,7 @@ This is useful if you can't merge your changes upstream or don't want to maintai
|
|||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||||
|
|
||||||
### Building
|
Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
FROM golang:alpine AS builder
|
FROM golang AS builder
|
||||||
|
|
||||||
COPY . /go/src/github.com/rclone/rclone/
|
COPY . /go/src/github.com/rclone/rclone/
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
WORKDIR /go/src/github.com/rclone/rclone/
|
||||||
|
|
||||||
RUN apk add --no-cache make bash gawk git
|
|
||||||
RUN \
|
RUN \
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
make
|
make
|
||||||
@@ -12,7 +11,7 @@ RUN ./rclone version
|
|||||||
# Begin final image
|
# Begin final image
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
RUN apk --no-cache add ca-certificates fuse tzdata && \
|
||||||
echo "user_allow_other" >> /etc/fuse.conf
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||||
|
|||||||
@@ -15,16 +15,11 @@ Current active maintainers of rclone are:
|
|||||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||||
| Max Sum | @Max-Sum | union backend |
|
| Max Sum | @Max-Sum | union backend |
|
||||||
| Fred | @creativeprojects | seafile backend |
|
| Fred | @creativeprojects | seafile backend |
|
||||||
| Caleb Case | @calebcase | storj backend |
|
| Caleb Case | @calebcase | tardigrade backend |
|
||||||
| wiserain | @wiserain | pikpak backend |
|
|
||||||
| albertony | @albertony | |
|
|
||||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
|
||||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
|
||||||
| nielash | @nielash | bisync |
|
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||||
|
|
||||||
## Triaging Tickets ##
|
## Triaging Tickets ##
|
||||||
|
|
||||||
@@ -32,15 +27,15 @@ When a ticket comes in it should be triaged. This means it should be classified
|
|||||||
|
|
||||||
Rclone uses the labels like this:
|
Rclone uses the labels like this:
|
||||||
|
|
||||||
* `bug` - a definitely verified bug
|
* `bug` - a definite verified bug
|
||||||
* `can't reproduce` - a problem which we can't reproduce
|
* `can't reproduce` - a problem which we can't reproduce
|
||||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||||
* `enhancement: new remote` - a new rclone backend
|
* `enhancement: new remote` - a new rclone backend
|
||||||
* `enhancement` - a new feature
|
* `enhancement` - a new feature
|
||||||
* `FUSE` - to do with `rclone mount` command
|
* `FUSE` - to do with `rclone mount` command
|
||||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
* `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
@@ -56,7 +51,7 @@ The milestones have these meanings:
|
|||||||
|
|
||||||
* v1.XX - stuff we would like to fit into this release
|
* v1.XX - stuff we would like to fit into this release
|
||||||
* v1.XX+1 - stuff we are leaving until the next release
|
* v1.XX+1 - stuff we are leaving until the next release
|
||||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
|
|
||||||
@@ -70,7 +65,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
|||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||||
|
|
||||||
@@ -86,15 +81,15 @@ Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
|||||||
|
|
||||||
High impact regressions should be fixed before the next release.
|
High impact regressions should be fixed before the next release.
|
||||||
|
|
||||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||||
|
|
||||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||||
|
|
||||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||||
|
|
||||||
## Mailing list ##
|
## Mailing list ##
|
||||||
|
|
||||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||||
|
|
||||||
## TODO ##
|
## TODO ##
|
||||||
|
|
||||||
|
|||||||
42976
MANUAL.html
generated
42976
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
48144
MANUAL.txt
generated
48144
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
68
Makefile
68
Makefile
@@ -30,7 +30,6 @@ ifdef RELEASE_TAG
|
|||||||
TAG := $(RELEASE_TAG)
|
TAG := $(RELEASE_TAG)
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_OS := $(shell go env GOOS)
|
|
||||||
ifdef BETA_SUBDIR
|
ifdef BETA_SUBDIR
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||||
endif
|
endif
|
||||||
@@ -47,13 +46,7 @@ endif
|
|||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone test_all vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
go run bin/resource_windows.go -version $(TAG) -syso resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||||
ifeq ($(GO_OS),windows)
|
|
||||||
rm resource_windows_`go env GOARCH`.syso
|
|
||||||
endif
|
|
||||||
mkdir -p `go env GOPATH`/bin/
|
mkdir -p `go env GOPATH`/bin/
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||||
@@ -73,10 +66,6 @@ btest:
|
|||||||
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
|
||||||
@echo "Copied markdown of beta release to clip board"
|
@echo "Copied markdown of beta release to clip board"
|
||||||
|
|
||||||
btesth:
|
|
||||||
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
|
|
||||||
@echo "Copied beta release in HTML to clip board"
|
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
@@ -92,9 +81,6 @@ quicktest:
|
|||||||
racequicktest:
|
racequicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||||
|
|
||||||
compiletest:
|
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||||
@@ -107,21 +93,21 @@ build_dep:
|
|||||||
|
|
||||||
# Get the release dependencies we only install on linux
|
# Get the release dependencies we only install on linux
|
||||||
release_dep_linux:
|
release_dep_linux:
|
||||||
go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
|
||||||
|
|
||||||
|
# Get the release dependencies we only install on Windows
|
||||||
|
release_dep_windows:
|
||||||
|
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
showupdates:
|
showupdates:
|
||||||
@echo "*** Direct dependencies that could be updated ***"
|
@echo "*** Direct dependencies that could be updated ***"
|
||||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||||
|
|
||||||
# Update direct dependencies only
|
|
||||||
updatedirect:
|
|
||||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
|
||||||
GO111MODULE=on go mod tidy
|
|
||||||
|
|
||||||
# Update direct and indirect dependencies and test dependencies
|
# Update direct and indirect dependencies and test dependencies
|
||||||
update:
|
update:
|
||||||
GO111MODULE=on go get -d -u -t ./...
|
GO111MODULE=on go get -u -t ./...
|
||||||
|
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go mod tidy
|
||||||
|
|
||||||
# Tidy the module dependencies
|
# Tidy the module dependencies
|
||||||
@@ -153,7 +139,7 @@ rcdocs: rclone
|
|||||||
|
|
||||||
install: rclone
|
install: rclone
|
||||||
install -d ${DESTDIR}/usr/bin
|
install -d ${DESTDIR}/usr/bin
|
||||||
install ${GOPATH}/bin/rclone ${DESTDIR}/usr/bin
|
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
go clean ./...
|
go clean ./...
|
||||||
@@ -255,48 +241,18 @@ retag:
|
|||||||
startdev:
|
startdev:
|
||||||
@echo "Version is $(VERSION)"
|
@echo "Version is $(VERSION)"
|
||||||
@echo "Next version is $(NEXT_VERSION)"
|
@echo "Next version is $(NEXT_VERSION)"
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_VERSION)\"\n" | gofmt > fs/versiontag.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
echo "$(NEXT_VERSION)" > VERSION
|
||||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
startstable:
|
startstable:
|
||||||
@echo "Version is $(VERSION)"
|
@echo "Version is $(VERSION)"
|
||||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||||
echo -e "package fs\n\n// VersionTag of rclone\nvar VersionTag = \"$(NEXT_PATCH_VERSION)\"\n" | gofmt > fs/versiontag.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/versiontag.go VERSION docs/layouts/partials/version.html
|
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
||||||
# docker volume plugin
|
|
||||||
PLUGIN_USER ?= rclone
|
|
||||||
PLUGIN_TAG ?= latest
|
|
||||||
PLUGIN_BASE_TAG ?= latest
|
|
||||||
PLUGIN_ARCH ?= amd64
|
|
||||||
PLUGIN_IMAGE := $(PLUGIN_USER)/docker-volume-rclone:$(PLUGIN_TAG)
|
|
||||||
PLUGIN_BASE := $(PLUGIN_USER)/rclone:$(PLUGIN_BASE_TAG)
|
|
||||||
PLUGIN_BUILD_DIR := ./build/docker-plugin
|
|
||||||
PLUGIN_CONTRIB_DIR := ./contrib/docker-plugin/managed
|
|
||||||
|
|
||||||
docker-plugin-create:
|
|
||||||
docker buildx inspect |grep -q /${PLUGIN_ARCH} || \
|
|
||||||
docker run --rm --privileged tonistiigi/binfmt --install all
|
|
||||||
rm -rf ${PLUGIN_BUILD_DIR}
|
|
||||||
docker buildx build \
|
|
||||||
--no-cache --pull \
|
|
||||||
--build-arg BASE_IMAGE=${PLUGIN_BASE} \
|
|
||||||
--platform linux/${PLUGIN_ARCH} \
|
|
||||||
--output ${PLUGIN_BUILD_DIR}/rootfs \
|
|
||||||
${PLUGIN_CONTRIB_DIR}
|
|
||||||
cp ${PLUGIN_CONTRIB_DIR}/config.json ${PLUGIN_BUILD_DIR}
|
|
||||||
docker plugin rm --force ${PLUGIN_IMAGE} 2>/dev/null || true
|
|
||||||
docker plugin create ${PLUGIN_IMAGE} ${PLUGIN_BUILD_DIR}
|
|
||||||
|
|
||||||
docker-plugin-push:
|
|
||||||
docker plugin push ${PLUGIN_IMAGE}
|
|
||||||
docker plugin rm ${PLUGIN_IMAGE}
|
|
||||||
|
|
||||||
docker-plugin: docker-plugin-create docker-plugin-push
|
|
||||||
|
|||||||
59
README.md
59
README.md
@@ -1,9 +1,8 @@
|
|||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
[Documentation](https://rclone.org/docs/) |
|
[Documentation](https://rclone.org/docs/) |
|
||||||
[Download](https://rclone.org/downloads/) |
|
[Download](https://rclone.org/downloads/) |
|
||||||
[Contributing](CONTRIBUTING.md) |
|
[Contributing](CONTRIBUTING.md) |
|
||||||
[Changelog](https://rclone.org/changelog/) |
|
[Changelog](https://rclone.org/changelog/) |
|
||||||
[Installation](https://rclone.org/install/) |
|
[Installation](https://rclone.org/install/) |
|
||||||
@@ -11,108 +10,71 @@
|
|||||||
|
|
||||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||||
[](https://godoc.org/github.com/rclone/rclone)
|
[](https://godoc.org/github.com/rclone/rclone)
|
||||||
[](https://hub.docker.com/r/rclone/rclone)
|
[](https://hub.docker.com/r/rclone/rclone)
|
||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
|
||||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
|
||||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
|
||||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
|
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
|
||||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
|
||||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
|
||||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
|
||||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
|
||||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
### Virtual storage providers
|
|
||||||
|
|
||||||
These backends adapt or modify other storage providers
|
|
||||||
|
|
||||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
|
||||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
|
||||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
|
||||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
|
||||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
|
||||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
|
||||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
|
||||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
@@ -125,9 +87,10 @@ These backends adapt or modify other storage providers
|
|||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
|
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
* Multi-threaded downloads to local disk
|
* Multi-threaded downloads to local disk
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
@@ -148,5 +111,5 @@ Please see the [rclone website](https://rclone.org/) for:
|
|||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
This is free software under the terms of the MIT license (check the
|
This is free software under the terms of MIT the license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
[COPYING file](/COPYING) included in this package).
|
||||||
|
|||||||
102
RELEASE.md
102
RELEASE.md
@@ -10,7 +10,7 @@ This file describes how to make the various kinds of releases
|
|||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
* git checkout master # see below for stable branch
|
* git checkout master # see below for stable branch
|
||||||
* git pull # IMPORTANT
|
* git pull
|
||||||
* git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
* Check GitHub actions build for master is Green
|
* Check GitHub actions build for master is Green
|
||||||
* make test # see integration test server or run locally
|
* make test # see integration test server or run locally
|
||||||
@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
|
|||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
* git commit -a -v -m "Version v1.XX.0"
|
||||||
* make retag
|
* make retag
|
||||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
|
||||||
* git push --follow-tags origin
|
* git push --follow-tags origin
|
||||||
* # Wait for the GitHub builds to complete then...
|
* # Wait for the GitHub builds to complete then...
|
||||||
* make fetch_binaries
|
* make fetch_binaries
|
||||||
@@ -35,35 +34,13 @@ This file describes how to make the various kinds of releases
|
|||||||
* make startdev # make startstable for stable branch
|
* make startdev # make startstable for stable branch
|
||||||
* # announce with forum post, twitter post, patreon post
|
* # announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
## Update dependencies
|
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies
|
Early in the next release cycle update the dependencies
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* make updatedirect
|
|
||||||
* make GOTAGS=cmount
|
|
||||||
* make compiletest
|
|
||||||
* git commit -a -v
|
|
||||||
* make update
|
* make update
|
||||||
* make GOTAGS=cmount
|
* git status
|
||||||
* make compiletest
|
* git add new files
|
||||||
* roll back any updates which didn't compile
|
* git commit -a -v
|
||||||
* git commit -a -v --amend
|
|
||||||
* **NB** watch out for this changing the default go version in `go.mod`
|
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
|
||||||
and there can occasionally be forwards compatibility problems with
|
|
||||||
doing that so it may be necessary to roll back dependencies to the
|
|
||||||
version specified by `make updatedirect` in order to get rclone to
|
|
||||||
build.
|
|
||||||
|
|
||||||
## Tidy beta
|
|
||||||
|
|
||||||
At some point after the release run
|
|
||||||
|
|
||||||
bin/tidy-beta v1.55
|
|
||||||
|
|
||||||
where the version number is that of a couple ago to remove old beta binaries.
|
|
||||||
|
|
||||||
## Making a point release
|
## Making a point release
|
||||||
|
|
||||||
@@ -78,7 +55,8 @@ Set vars
|
|||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
this will be done already.
|
this will be done already.
|
||||||
|
|
||||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||||
|
* git co ${BASE_TAG}-stable
|
||||||
* make startstable
|
* make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
@@ -93,52 +71,34 @@ Now
|
|||||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
* git push
|
* git push
|
||||||
|
|
||||||
## Sponsor logos
|
|
||||||
|
|
||||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
|
||||||
|
|
||||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
|
||||||
which is a private repo containing artwork from sponsors.
|
|
||||||
|
|
||||||
## Update the website between releases
|
|
||||||
|
|
||||||
Create an update website branch based off the last release
|
|
||||||
|
|
||||||
git co -b update-website
|
|
||||||
|
|
||||||
If the branch already exists, double check there are no commits that need saving.
|
|
||||||
|
|
||||||
Now reset the branch to the last release
|
|
||||||
|
|
||||||
git reset --hard v1.64.0
|
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
|
||||||
|
|
||||||
make upload_test_website
|
|
||||||
|
|
||||||
Check out https://test.rclone.org and when happy
|
|
||||||
|
|
||||||
make upload_website
|
|
||||||
|
|
||||||
Cherry pick any changes back to master and the stable branch if it is active.
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
## Making a manual build of docker
|
||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||||
|
or needs to be updated then rebuild like this.
|
||||||
```
|
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||||
docker run --rm rclone/rclone:testing version
|
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||||
```
|
|
||||||
|
|
||||||
To test the multipatform build
|
|
||||||
|
|
||||||
```
|
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
|
||||||
```
|
|
||||||
|
|
||||||
To make a full build then set the tags correctly and add `--push`
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
git co v1.54.1
|
||||||
|
docker pull golang
|
||||||
|
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||||
|
docker buildx create --name actions_builder --use
|
||||||
|
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||||
|
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||||
|
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||||
|
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
|
docker buildx stop actions_builder
|
||||||
|
```
|
||||||
|
|
||||||
|
### Old build for linux/amd64 only
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull golang
|
||||||
|
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||||
|
docker push rclone/rclone:1.52.0
|
||||||
|
docker push rclone/rclone:1.52
|
||||||
|
docker push rclone/rclone:1
|
||||||
|
docker push rclone/rclone:latest
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package alias implements a virtual provider to rename existing remotes.
|
|
||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -21,7 +20,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||||
Required: true,
|
Required: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func prepare(t *testing.T, root string) {
|
func prepare(t *testing.T, root string) {
|
||||||
configfile.Install()
|
configfile.LoadConfig(context.Background())
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.FileSet(remoteName, "type", "alias")
|
config.FileSet(remoteName, "type", "alias")
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package all imports all the backends
|
|
||||||
package all
|
package all
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -6,12 +5,10 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/rclone/rclone/backend/alias"
|
||||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/rclone/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/rclone/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/rclone/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/rclone/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/rclone/rclone/backend/chunker"
|
||||||
_ "github.com/rclone/rclone/backend/combine"
|
|
||||||
_ "github.com/rclone/rclone/backend/compress"
|
_ "github.com/rclone/rclone/backend/compress"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
@@ -21,41 +18,29 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/rclone/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||||
_ "github.com/rclone/rclone/backend/hasher"
|
|
||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
_ "github.com/rclone/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/rclone/rclone/backend/koofr"
|
||||||
_ "github.com/rclone/rclone/backend/linkbox"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/rclone/rclone/backend/mailru"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/rclone/rclone/backend/mega"
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
_ "github.com/rclone/rclone/backend/memory"
|
||||||
_ "github.com/rclone/rclone/backend/netstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/rclone/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/rclone/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/rclone/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/pikpak"
|
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||||
_ "github.com/rclone/rclone/backend/protondrive"
|
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
_ "github.com/rclone/rclone/backend/putio"
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
_ "github.com/rclone/rclone/backend/qingstor"
|
||||||
_ "github.com/rclone/rclone/backend/quatrix"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
|
||||||
_ "github.com/rclone/rclone/backend/storj"
|
|
||||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
_ "github.com/rclone/rclone/backend/swift"
|
||||||
|
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
_ "github.com/rclone/rclone/backend/union"
|
||||||
_ "github.com/rclone/rclone/backend/uptobox"
|
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
_ "github.com/rclone/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
_ "github.com/rclone/rclone/backend/yandex"
|
||||||
_ "github.com/rclone/rclone/backend/zoho"
|
_ "github.com/rclone/rclone/backend/zoho"
|
||||||
|
|||||||
@@ -14,15 +14,16 @@ we ignore assets completely!
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
acd "github.com/ncw/go-acd"
|
acd "github.com/ncw/go-acd"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -69,10 +70,11 @@ func init() {
|
|||||||
Prefix: "acd",
|
Prefix: "acd",
|
||||||
Description: "Amazon Drive",
|
Description: "Amazon Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||||
OAuth2Config: acdConfig,
|
if err != nil {
|
||||||
})
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "checkpoint",
|
Name: "checkpoint",
|
||||||
@@ -81,16 +83,16 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_wait_per_gb",
|
Name: "upload_wait_per_gb",
|
||||||
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
|
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
Sometimes Amazon Drive gives an error when a file has been fully
|
||||||
uploaded but the file appears anyway after a little while. This
|
uploaded but the file appears anyway after a little while. This
|
||||||
happens sometimes for files over 1 GiB in size and nearly every time for
|
happens sometimes for files over 1GB in size and nearly every time for
|
||||||
files bigger than 10 GiB. This parameter controls the time rclone waits
|
files bigger than 10GB. This parameter controls the time rclone waits
|
||||||
for the file to appear.
|
for the file to appear.
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GiB, so by
|
The default value for this parameter is 3 minutes per GB, so by
|
||||||
default it will wait 3 minutes for every GiB uploaded to see if the
|
default it will wait 3 minutes for every GB uploaded to see if the
|
||||||
file appears.
|
file appears.
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
You can disable this feature by setting it to 0. This may cause
|
||||||
@@ -110,7 +112,7 @@ in this situation.`,
|
|||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
Files this size or more will be downloaded via their "tempLink". This
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
is to work around a problem with Amazon Drive which blocks downloads
|
||||||
of files bigger than about 10 GiB. The default for this is 9 GiB which
|
of files bigger than about 10GB. The default for this is 9GB which
|
||||||
shouldn't need to be changed.
|
shouldn't need to be changed.
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
To download files above this threshold, rclone requests a "tempLink"
|
||||||
@@ -259,7 +261,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
|
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := acd.NewClient(oAuthClient)
|
c := acd.NewClient(oAuthClient)
|
||||||
@@ -292,13 +294,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get endpoints: %w", err)
|
return nil, errors.Wrap(err, "failed to get endpoints")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, err := f.getRootInfo(ctx)
|
rootInfo, err := f.getRootInfo(ctx)
|
||||||
if err != nil || rootInfo.Id == nil {
|
if err != nil || rootInfo.Id == nil {
|
||||||
return nil, fmt.Errorf("failed to get root: %w", err)
|
return nil, errors.Wrap(err, "failed to get root")
|
||||||
}
|
}
|
||||||
f.trueRootID = *rootInfo.Id
|
f.trueRootID = *rootInfo.Id
|
||||||
|
|
||||||
@@ -435,7 +437,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
|
|||||||
query += " AND kind:" + folderKind
|
query += " AND kind:" + folderKind
|
||||||
} else if filesOnly {
|
} else if filesOnly {
|
||||||
query += " AND kind:" + fileKind
|
query += " AND kind:" + fileKind
|
||||||
//} else {
|
} else {
|
||||||
// FIXME none of these work
|
// FIXME none of these work
|
||||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||||
@@ -556,9 +558,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// This is a workaround for Amazon sometimes returning
|
// This is a workaround for Amazon sometimes returning
|
||||||
//
|
//
|
||||||
// - 408 REQUEST_TIMEOUT
|
// * 408 REQUEST_TIMEOUT
|
||||||
// - 504 GATEWAY_TIMEOUT
|
// * 504 GATEWAY_TIMEOUT
|
||||||
// - 500 Internal server error
|
// * 500 Internal server error
|
||||||
//
|
//
|
||||||
// At the end of large uploads. The speculation is that the timeout
|
// At the end of large uploads. The speculation is that the timeout
|
||||||
// is waiting for the sha1 hashing to complete and the file may well
|
// is waiting for the sha1 hashing to complete and the file may well
|
||||||
@@ -626,7 +628,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
|||||||
|
|
||||||
// Put the object into the container
|
// Put the object into the container
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -685,9 +687,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1002,6 +1004,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Test AmazonCloudDrive filesystem interface
|
// Test AmazonCloudDrive filesystem interface
|
||||||
|
|
||||||
//go:build acd
|
|
||||||
// +build acd
|
// +build acd
|
||||||
|
|
||||||
package amazonclouddrive_test
|
package amazonclouddrive_test
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,4 @@
|
|||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
@@ -20,18 +19,17 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
|
|
||||||
func TestIncrement(t *testing.T) {
|
func TestIncrement(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in [8]byte
|
in []byte
|
||||||
want [8]byte
|
want []byte
|
||||||
}{
|
}{
|
||||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
|
||||||
} {
|
} {
|
||||||
increment(&test.in)
|
increment(test.in)
|
||||||
assert.Equal(t, test.want, test.in)
|
assert.Equal(t, test.want, test.in)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,14 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !solaris && !js
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
// +build !plan9,!solaris,!js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
@@ -19,28 +18,9 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestAzureBlob:",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MaxChunkSize: maxChunkSize,
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration2 runs integration tests against the remote
|
|
||||||
func TestIntegration2(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
name := "TestAzureBlob"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: defaultChunkSize,
|
|
||||||
},
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -53,25 +33,32 @@ var (
|
|||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||||
tests := map[string]struct {
|
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||||
accessTier string
|
ctx := context.TODO()
|
||||||
want bool
|
credentials := `
|
||||||
}{
|
{
|
||||||
"hot": {"hot", true},
|
"appId": "my application (client) ID",
|
||||||
"HOT": {"HOT", true},
|
"password": "my secret",
|
||||||
"Hot": {"Hot", true},
|
"tenant": "my active directory tenant ID"
|
||||||
"cool": {"cool", true},
|
}
|
||||||
"cold": {"cold", true},
|
`
|
||||||
"archive": {"archive", true},
|
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
"empty": {"", false},
|
if assert.NoError(t, err) {
|
||||||
"unknown": {"unknown", false},
|
assert.NotNil(t, tokenRefresher)
|
||||||
}
|
|
||||||
|
|
||||||
for name, test := range tests {
|
|
||||||
t.Run(name, func(t *testing.T) {
|
|
||||||
got := validateAccessTier(test.accessTier)
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||||
|
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
credentials := `
|
||||||
|
{
|
||||||
|
"appId": "my application (client) ID",
|
||||||
|
"tenant": "my active directory tenant ID"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || solaris || js
|
// +build plan9 solaris js !go1.14
|
||||||
// +build plan9 solaris js
|
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
137
backend/azureblob/imds.go
Normal file
137
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
azureResource = "https://storage.azure.com"
|
||||||
|
imdsAPIVersion = "2018-02-01"
|
||||||
|
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This custom type is used to add the port the test server has bound to
|
||||||
|
// to the request context.
|
||||||
|
type testPortKey string
|
||||||
|
|
||||||
|
type msiIdentifierType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
msiClientID msiIdentifierType = iota
|
||||||
|
msiObjectID
|
||||||
|
msiResourceID
|
||||||
|
)
|
||||||
|
|
||||||
|
type userMSI struct {
|
||||||
|
Type msiIdentifierType
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpError struct {
|
||||||
|
Response *http.Response
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e httpError) Error() string {
|
||||||
|
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||||
|
// Metadata Service.
|
||||||
|
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||||
|
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||||
|
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||||
|
result := adal.Token{}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
params := req.URL.Query()
|
||||||
|
params.Set("resource", azureResource)
|
||||||
|
params.Set("api-version", imdsAPIVersion)
|
||||||
|
|
||||||
|
// Specify user-assigned identity if requested.
|
||||||
|
if identity != nil {
|
||||||
|
switch identity.Type {
|
||||||
|
case msiClientID:
|
||||||
|
params.Set("client_id", identity.Value)
|
||||||
|
case msiObjectID:
|
||||||
|
params.Set("object_id", identity.Value)
|
||||||
|
case msiResourceID:
|
||||||
|
params.Set("mi_res_id", identity.Value)
|
||||||
|
default:
|
||||||
|
// If this happens, the calling function and this one don't agree on
|
||||||
|
// what valid ID types exist.
|
||||||
|
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req.URL.RawQuery = params.Encode()
|
||||||
|
|
||||||
|
// The Metadata header is required by all calls to IMDS.
|
||||||
|
req.Header.Set("Metadata", "true")
|
||||||
|
|
||||||
|
// If this function is run in a test, query the test server instead of IMDS.
|
||||||
|
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||||
|
if isTest {
|
||||||
|
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||||
|
req.Host = req.URL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send request
|
||||||
|
httpClient := fshttp.NewClient(ctx)
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "MSI is not enabled on this VM")
|
||||||
|
}
|
||||||
|
defer func() { // resp and Body should not be nil
|
||||||
|
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
err = resp.Body.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Check if the status code indicates success
|
||||||
|
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||||
|
switch resp.StatusCode {
|
||||||
|
case 200, 201, 202:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
body, _ := ioutil.ReadAll(resp.Body)
|
||||||
|
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||||
|
return result, httpError{Response: resp}
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't read IMDS response")
|
||||||
|
}
|
||||||
|
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||||
|
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||||
|
|
||||||
|
// This would be a good place to persist the token if a large number of rclone
|
||||||
|
// invocations are being made in a short amount of time. If the token is
|
||||||
|
// persisted, the azureblob code will need to check for expiry before every
|
||||||
|
// storage API call.
|
||||||
|
err = json.Unmarshal(b, &result)
|
||||||
|
if err != nil {
|
||||||
|
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
117
backend/azureblob/imds_test.go
Normal file
117
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
// +build !plan9,!solaris,!js,go1.14
|
||||||
|
|
||||||
|
package azureblob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/go-autorest/autorest/adal"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
err := r.ParseForm()
|
||||||
|
require.NoError(t, err)
|
||||||
|
parameters := r.URL.Query()
|
||||||
|
(*actual)["path"] = r.URL.Path
|
||||||
|
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||||
|
(*actual)["method"] = r.Method
|
||||||
|
for paramName := range parameters {
|
||||||
|
(*actual)[paramName] = parameters.Get(paramName)
|
||||||
|
}
|
||||||
|
// Make response.
|
||||||
|
response := adal.Token{}
|
||||||
|
responseBytes, err := json.Marshal(response)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = w.Write(responseBytes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestManagedIdentity(t *testing.T) {
|
||||||
|
// test user-assigned identity specifiers to use
|
||||||
|
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||||
|
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||||
|
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||||
|
tests := []struct {
|
||||||
|
identity *userMSI
|
||||||
|
identityParameterName string
|
||||||
|
expectedAbsent []string
|
||||||
|
}{
|
||||||
|
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||||
|
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||||
|
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||||
|
}
|
||||||
|
alwaysExpected := map[string]string{
|
||||||
|
"path": "/metadata/identity/oauth2/token",
|
||||||
|
"resource": "https://storage.azure.com",
|
||||||
|
"Metadata": "true",
|
||||||
|
"api-version": "2018-02-01",
|
||||||
|
"method": "GET",
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
actual := make(map[string]string, 10)
|
||||||
|
testServer := httptest.NewServer(handler(t, &actual))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, test.identity)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Validate expected query parameters present
|
||||||
|
expected := make(map[string]string)
|
||||||
|
for k, v := range alwaysExpected {
|
||||||
|
expected[k] = v
|
||||||
|
}
|
||||||
|
if test.identity != nil {
|
||||||
|
expected[test.identityParameterName] = test.identity.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
for key := range expected {
|
||||||
|
value, exists := actual[key]
|
||||||
|
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||||
|
test.identityParameterName, key) {
|
||||||
|
assert.Equalf(t, expected[key], value,
|
||||||
|
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate unexpected query parameters absent
|
||||||
|
for _, key := range test.expectedAbsent {
|
||||||
|
_, exists := actual[key]
|
||||||
|
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorHandler(resultCode int) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
http.Error(w, "Test error generated", resultCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIMDSErrors(t *testing.T) {
|
||||||
|
errorCodes := []int{404, 429, 500}
|
||||||
|
for _, code := range errorCodes {
|
||||||
|
testServer := httptest.NewServer(errorHandler(code))
|
||||||
|
defer testServer.Close()
|
||||||
|
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||||
|
_, err = GetMSIToken(ctx, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
httpErr, ok := err.(httpError)
|
||||||
|
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||||
|
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,70 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"math/rand"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("Authentication", f.InternalTestAuth)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestAuth(t *testing.T) {
|
|
||||||
t.Skip("skipping since this requires authentication credentials which are not part of repo")
|
|
||||||
shareName := "test-rclone-oct-2023"
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
options *Options
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "ConnectionString",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
ConnectionString: "",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "AccountAndKey",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
Account: "",
|
|
||||||
Key: "",
|
|
||||||
}},
|
|
||||||
{
|
|
||||||
name: "SASUrl",
|
|
||||||
options: &Options{
|
|
||||||
ShareName: shareName,
|
|
||||||
SASURL: "",
|
|
||||||
}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
fs, err := newFsFromOptions(context.TODO(), "TestAzureFiles", "", tc.options)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
dirName := randomString(10)
|
|
||||||
assert.NoError(t, fs.Mkdir(context.TODO(), dirName))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|
||||||
|
|
||||||
func randomString(charCount int) string {
|
|
||||||
strBldr := strings.Builder{}
|
|
||||||
for i := 0; i < charCount; i++ {
|
|
||||||
randPos := rand.Int63n(52)
|
|
||||||
strBldr.WriteByte(chars[randPos])
|
|
||||||
}
|
|
||||||
return strBldr.String()
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
var objPtr *Object
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestAzureFiles:",
|
|
||||||
NilObject: objPtr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
|
||||||
// about "no buildable Go source files "
|
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
// +build plan9 js
|
|
||||||
|
|
||||||
package azurefiles
|
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
// Package api provides types used by the Backblaze B2 API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -33,18 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
|
|||||||
|
|
||||||
// Bucket describes a B2 bucket
|
// Bucket describes a B2 bucket
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
ID string `json:"bucketId"`
|
ID string `json:"bucketId"`
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// LifecycleRule is a single lifecycle rule
|
|
||||||
type LifecycleRule struct {
|
|
||||||
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
|
|
||||||
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
@@ -71,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
|
const versionFormat = "-v2006-01-02-150405.000"
|
||||||
//
|
|
||||||
// Note that the passed filename's timestamp may still be invalid even if this
|
|
||||||
// function returns true.
|
|
||||||
func HasVersion(remote string) bool {
|
|
||||||
return version.Match(remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||||
func (t Timestamp) AddVersion(remote string) string {
|
func (t Timestamp) AddVersion(remote string) string {
|
||||||
return version.Add(remote, time.Time(t))
|
ext := path.Ext(remote)
|
||||||
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
s := time.Time(t).Format(versionFormat)
|
||||||
|
// Replace the '.' with a '-'
|
||||||
|
s = strings.Replace(s, ".", "-", -1)
|
||||||
|
return base + s + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||||
@@ -89,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
|
|||||||
// It returns the new file name and a timestamp, or the old filename
|
// It returns the new file name and a timestamp, or the old filename
|
||||||
// and a zero timestamp.
|
// and a zero timestamp.
|
||||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||||
time, newRemote := version.Remove(remote)
|
newRemote = remote
|
||||||
t = Timestamp(time)
|
ext := path.Ext(remote)
|
||||||
return
|
base := remote[:len(remote)-len(ext)]
|
||||||
|
if len(base) < len(versionFormat) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
versionStart := len(base) - len(versionFormat)
|
||||||
|
// Check it ends in -xxx
|
||||||
|
if base[len(base)-4] != '-' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Replace with .xxx for parsing
|
||||||
|
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||||
|
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is uninitialized
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
@@ -214,10 +220,9 @@ type FileInfo struct {
|
|||||||
|
|
||||||
// CreateBucketRequest is used to create a bucket
|
// CreateBucketRequest is used to create a bucket
|
||||||
type CreateBucketRequest struct {
|
type CreateBucketRequest struct {
|
||||||
AccountID string `json:"accountId"`
|
AccountID string `json:"accountId"`
|
||||||
Name string `json:"bucketName"`
|
Name string `json:"bucketName"`
|
||||||
Type string `json:"bucketType"`
|
Type string `json:"bucketType"`
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucketRequest is used to create a bucket
|
// DeleteBucketRequest is used to create a bucket
|
||||||
@@ -248,7 +253,7 @@ type GetFileInfoRequest struct {
|
|||||||
// If the original source of the file being uploaded has a last
|
// If the original source of the file being uploaded has a last
|
||||||
// modified time concept, Backblaze recommends using
|
// modified time concept, Backblaze recommends using
|
||||||
// src_last_modified_millis as the name, and a string holding the base
|
// src_last_modified_millis as the name, and a string holding the base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970
|
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||||
// programming language Java. It is intended to be compatible with
|
// programming language Java. It is intended to be compatible with
|
||||||
// Java's time long. For example, it can be passed directly into the
|
// Java's time long. For example, it can be passed directly into the
|
||||||
@@ -340,11 +345,3 @@ type CopyPartRequest struct {
|
|||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
|
||||||
type UpdateBucketRequest struct {
|
|
||||||
ID string `json:"bucketId"`
|
|
||||||
AccountID string `json:"accountId"`
|
|
||||||
Type string `json:"bucketType,omitempty"`
|
|
||||||
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
var (
|
var (
|
||||||
emptyT api.Timestamp
|
emptyT api.Timestamp
|
||||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||||
|
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
|
|||||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimestampAddVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
t api.Timestamp
|
||||||
|
in string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||||
|
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||||
|
{t1, "", "-v2001-02-03-040506-123"},
|
||||||
|
} {
|
||||||
|
actual := test.t.AddVersion(test.in)
|
||||||
|
assert.Equal(t, test.expected, actual, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTimestampRemoveVersion(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expectedT api.Timestamp
|
||||||
|
expectedRemote string
|
||||||
|
}{
|
||||||
|
{"potato.txt", emptyT, "potato.txt"},
|
||||||
|
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||||
|
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||||
|
{"-v2001-02-03-040506-123", t1, ""},
|
||||||
|
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||||
|
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||||
|
} {
|
||||||
|
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||||
|
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||||
|
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestTimestampIsZero(t *testing.T) {
|
func TestTimestampIsZero(t *testing.T) {
|
||||||
assert.True(t, emptyT.IsZero())
|
assert.True(t, emptyT.IsZero())
|
||||||
assert.False(t, t0.IsZero())
|
assert.False(t, t0.IsZero())
|
||||||
|
|||||||
561
backend/b2/b2.go
561
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
@@ -169,10 +168,3 @@ func TestParseTimeString(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
// Internal tests go here
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -28,12 +28,7 @@ func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadCutoff(cs)
|
return f.setUploadCutoff(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setCopyCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
package b2
|
package b2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -14,13 +15,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/rclone/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/pool"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
@@ -78,31 +78,33 @@ type largeUpload struct {
|
|||||||
wrap accounting.WrapFn // account parts being transferred
|
wrap accounting.WrapFn // account parts being transferred
|
||||||
id string // ID of the file being uploaded
|
id string // ID of the file being uploaded
|
||||||
size int64 // total size
|
size int64 // total size
|
||||||
parts int // calculated number of parts, if known
|
parts int64 // calculated number of parts, if known
|
||||||
sha1smu sync.Mutex // mutex to protect sha1s
|
|
||||||
sha1s []string // slice of SHA1s for each part
|
sha1s []string // slice of SHA1s for each part
|
||||||
uploadMu sync.Mutex // lock for upload variable
|
uploadMu sync.Mutex // lock for upload variable
|
||||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||||
chunkSize int64 // chunk size to use
|
chunkSize int64 // chunk size to use
|
||||||
src *Object // if copying, object we are reading from
|
src *Object // if copying, object we are reading from
|
||||||
info *api.FileInfo // final response with info about the object
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
//
|
//
|
||||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||||
|
remote := o.remote
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := 0
|
parts := int64(0)
|
||||||
chunkSize := defaultChunkSize
|
sha1SliceSize := int64(maxParts)
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
parts = size / int64(chunkSize)
|
||||||
parts = int(size / int64(chunkSize))
|
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(chunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
|
if parts > maxParts {
|
||||||
|
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||||
|
}
|
||||||
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -150,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
id: response.ID,
|
id: response.ID,
|
||||||
size: size,
|
size: size,
|
||||||
parts: parts,
|
parts: parts,
|
||||||
sha1s: make([]string, 0, 16),
|
sha1s: make([]string, sha1SliceSize),
|
||||||
chunkSize: int64(chunkSize),
|
chunkSize: int64(chunkSize),
|
||||||
}
|
}
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
@@ -169,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
if len(up.uploads) > 0 {
|
defer up.uploadMu.Unlock()
|
||||||
|
if len(up.uploads) == 0 {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_get_upload_part_url",
|
||||||
|
}
|
||||||
|
var request = api.GetUploadPartURLRequest{
|
||||||
|
ID: up.id,
|
||||||
|
}
|
||||||
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||||
|
return up.f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||||
up.uploadMu.Unlock()
|
|
||||||
return upload, nil
|
|
||||||
}
|
|
||||||
up.uploadMu.Unlock()
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/b2_get_upload_part_url",
|
|
||||||
}
|
|
||||||
var request = api.GetUploadPartURLRequest{
|
|
||||||
ID: up.id,
|
|
||||||
}
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get upload URL: %w", err)
|
|
||||||
}
|
}
|
||||||
return upload, nil
|
return upload, nil
|
||||||
}
|
}
|
||||||
@@ -203,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
|||||||
up.uploadMu.Unlock()
|
up.uploadMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add an sha1 to the being built up sha1s
|
// Transfer a chunk
|
||||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||||
up.sha1smu.Lock()
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
defer up.sha1smu.Unlock()
|
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||||
if len(up.sha1s) < chunkNumber+1 {
|
|
||||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
|
||||||
}
|
|
||||||
up.sha1s[chunkNumber] = sha1
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
|
||||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
|
||||||
// Only account after the checksum reads have been done
|
|
||||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
|
||||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
|
||||||
// multiple of what it should be, increase or decrease this number.
|
|
||||||
do.DelayAccounting(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = up.f.pacer.Call(func() (bool, error) {
|
|
||||||
// Discover the size by seeking to the end
|
|
||||||
size, err = reader.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// rewind the reader on retry and after reading size
|
|
||||||
_, err = reader.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL(ctx)
|
||||||
@@ -243,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
in := newHashAppendingReader(reader, sha1.New())
|
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||||
sizeWithHash := size + int64(in.AdditionalLength())
|
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||||
|
|
||||||
// Authorization
|
// Authorization
|
||||||
//
|
//
|
||||||
@@ -259,14 +230,14 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
//
|
//
|
||||||
// The number of bytes in the file being uploaded. Note that
|
// The number of bytes in the file being uploaded. Note that
|
||||||
// this header is required; you cannot leave it out and just
|
// this header is required; you cannot leave it out and just
|
||||||
// use chunked encoding. The minimum size of every part but
|
// use chunked encoding. The minimum size of every part but
|
||||||
// the last one is 100 MB (100,000,000 bytes)
|
// the last one is 100MB.
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
//
|
//
|
||||||
// The SHA1 checksum of the this part of the file. B2 will
|
// The SHA1 checksum of the this part of the file. B2 will
|
||||||
// check this when the part is uploaded, to make sure that the
|
// check this when the part is uploaded, to make sure that the
|
||||||
// data arrived correctly. The same SHA1 checksum must be
|
// data arrived correctly. The same SHA1 checksum must be
|
||||||
// passed to b2_finish_large_file.
|
// passed to b2_finish_large_file.
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -274,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
Body: up.wrap(in),
|
Body: up.wrap(in),
|
||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||||
sha1Header: "hex_digits_at_end",
|
sha1Header: "hex_digits_at_end",
|
||||||
},
|
},
|
||||||
ContentLength: &sizeWithHash,
|
ContentLength: &size,
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -285,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
// On retryable error clear PartUploadURL
|
// On retryable error clear PartUploadURL
|
||||||
if retry {
|
if retry {
|
||||||
@@ -293,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
upload = nil
|
upload = nil
|
||||||
}
|
}
|
||||||
up.returnUploadURL(upload)
|
up.returnUploadURL(upload)
|
||||||
up.addSha1(chunkNumber, in.HexSum())
|
up.sha1s[part-1] = in.HexSum()
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||||
}
|
}
|
||||||
return size, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy a chunk
|
// Copy a chunk
|
||||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_copy_part",
|
Path: "/b2_copy_part",
|
||||||
}
|
}
|
||||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||||
var request = api.CopyPartRequest{
|
var request = api.CopyPartRequest{
|
||||||
SourceID: up.src.id,
|
SourceID: up.src.id,
|
||||||
LargeFileID: up.id,
|
LargeFileID: up.id,
|
||||||
PartNumber: int64(part + 1),
|
PartNumber: part,
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
@@ -325,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
up.addSha1(part, response.SHA1)
|
up.sha1s[part-1] = response.SHA1
|
||||||
return retry, err
|
return retry, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -336,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes off the large upload
|
// finish closes off the large upload
|
||||||
func (up *largeUpload) Close(ctx context.Context) error {
|
func (up *largeUpload) finish(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -355,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up.info = &response
|
return up.o.decodeMetaDataFileInfo(&response)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abort aborts the large upload
|
// cancel aborts the large upload
|
||||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -385,105 +355,128 @@ func (up *largeUpload) Abort(ctx context.Context) error {
|
|||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
//
|
//
|
||||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
hasMoreParts = true
|
hasMoreParts = true
|
||||||
)
|
)
|
||||||
up.size = initialUploadBlock.Size()
|
up.size = int64(len(initialUploadBlock))
|
||||||
up.parts = 0
|
g.Go(func() error {
|
||||||
for part := 0; hasMoreParts; part++ {
|
for part := int64(1); hasMoreParts; part++ {
|
||||||
// Get a block of memory from the pool and token which limits concurrency.
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
var rw *pool.RW
|
var buf []byte
|
||||||
if part == 0 {
|
if part == 1 {
|
||||||
rw = initialUploadBlock
|
buf = initialUploadBlock
|
||||||
} else {
|
} else {
|
||||||
rw = up.f.getRW(false)
|
buf = up.f.getBuf(false)
|
||||||
}
|
|
||||||
|
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
|
||||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
|
||||||
if gCtx.Err() != nil {
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the chunk
|
|
||||||
var n int64
|
|
||||||
if part == 0 {
|
|
||||||
n = rw.Size()
|
|
||||||
} else {
|
|
||||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
|
||||||
if err == io.EOF {
|
|
||||||
if n == 0 {
|
|
||||||
fs.Debugf(up.o, "Not sending empty chunk after EOF - ending.")
|
|
||||||
up.f.putRW(rw)
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
fs.Debugf(up.o, "Read less than a full chunk %d, making this the last one.", n)
|
|
||||||
}
|
|
||||||
hasMoreParts = false
|
|
||||||
} else if err != nil {
|
|
||||||
// other kinds of errors indicate failure
|
|
||||||
up.f.putRW(rw)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Keep stats up to date
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
up.parts += 1
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
up.size += n
|
if gCtx.Err() != nil {
|
||||||
if part > maxParts {
|
up.f.putBuf(buf, false)
|
||||||
up.f.putRW(rw)
|
return nil
|
||||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
// Read the chunk
|
||||||
g.Go(func() (err error) {
|
var n int
|
||||||
defer up.f.putRW(rw)
|
if part == 1 {
|
||||||
_, err = up.WriteChunk(gCtx, part, rw)
|
n = len(buf)
|
||||||
return err
|
} else {
|
||||||
})
|
n, err = io.ReadFull(up.in, buf)
|
||||||
}
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||||
|
buf = buf[:n]
|
||||||
|
hasMoreParts = false
|
||||||
|
} else if err == io.EOF {
|
||||||
|
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
// other kinds of errors indicate failure
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep stats up to date
|
||||||
|
up.parts = part
|
||||||
|
up.size += int64(n)
|
||||||
|
if part > maxParts {
|
||||||
|
up.f.putBuf(buf, false)
|
||||||
|
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putBuf(buf, false)
|
||||||
|
return up.transferChunk(gCtx, part, buf)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
up.sha1s = up.sha1s[:up.parts]
|
||||||
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy the chunks from the source to the destination
|
// Upload uploads the chunks from the input
|
||||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||||
var (
|
var (
|
||||||
g, gCtx = errgroup.WithContext(ctx)
|
g, gCtx = errgroup.WithContext(ctx)
|
||||||
remaining = up.size
|
remaining = up.size
|
||||||
)
|
)
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
g.Go(func() error {
|
||||||
for part := 0; part < up.parts; part++ {
|
for part := int64(1); part <= up.parts; part++ {
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
// Get a block of memory from the pool and token which limits concurrency.
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
buf := up.f.getBuf(up.doCopy)
|
||||||
if gCtx.Err() != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
reqSize := remaining
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
if reqSize >= up.chunkSize {
|
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||||
reqSize = up.chunkSize
|
if gCtx.Err() != nil {
|
||||||
}
|
up.f.putBuf(buf, up.doCopy)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
part := part // for the closure
|
reqSize := remaining
|
||||||
g.Go(func() (err error) {
|
if reqSize >= up.chunkSize {
|
||||||
return up.copyChunk(gCtx, part, reqSize)
|
reqSize = up.chunkSize
|
||||||
})
|
}
|
||||||
remaining -= reqSize
|
|
||||||
}
|
if !up.doCopy {
|
||||||
|
// Read the chunk
|
||||||
|
buf = buf[:reqSize]
|
||||||
|
_, err = io.ReadFull(up.in, buf)
|
||||||
|
if err != nil {
|
||||||
|
up.f.putBuf(buf, up.doCopy)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
part := part // for the closure
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer up.f.putBuf(buf, up.doCopy)
|
||||||
|
if !up.doCopy {
|
||||||
|
err = up.transferChunk(gCtx, part, buf)
|
||||||
|
} else {
|
||||||
|
err = up.copyChunk(gCtx, part, reqSize)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
remaining -= reqSize
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
err = g.Wait()
|
err = g.Wait()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Close(ctx)
|
return up.finish(ctx)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC3339 + `"`
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// box API, by using RFC3339
|
// box API, by using RFC3339
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||||||
|
|
||||||
// Error is returned from box when things go wrong
|
// Error is returned from box when things go wrong
|
||||||
type Error struct {
|
type Error struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Status int `json:"status"`
|
Status int `json:"status"`
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
ContextInfo json.RawMessage `json:"context_info"`
|
ContextInfo json.RawMessage
|
||||||
HelpURL string `json:"help_url"`
|
HelpURL string `json:"help_url"`
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
|||||||
out += ": " + e.Message
|
out += ": " + e.Message
|
||||||
}
|
}
|
||||||
if e.ContextInfo != nil {
|
if e.ContextInfo != nil {
|
||||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
@@ -61,9 +61,9 @@ func (e *Error) Error() string {
|
|||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
||||||
|
|
||||||
// Types of things in Item/ItemMini
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
ItemTypeFolder = "folder"
|
ItemTypeFolder = "folder"
|
||||||
ItemTypeFile = "file"
|
ItemTypeFile = "file"
|
||||||
@@ -72,41 +72,24 @@ const (
|
|||||||
ItemStatusDeleted = "deleted"
|
ItemStatusDeleted = "deleted"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
|
||||||
type ItemMini struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
|
||||||
Etag string `json:"etag"`
|
|
||||||
SHA1 string `json:"sha1"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
type Item struct {
|
type Item struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
SequenceID int64 `json:"sequence_id,string"`
|
SequenceID string `json:"sequence_id"`
|
||||||
Etag string `json:"etag"`
|
Etag string `json:"etag"`
|
||||||
SHA1 string `json:"sha1"`
|
SHA1 string `json:"sha1"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||||
CreatedAt Time `json:"created_at"`
|
CreatedAt Time `json:"created_at"`
|
||||||
ModifiedAt Time `json:"modified_at"`
|
ModifiedAt Time `json:"modified_at"`
|
||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
Parent ItemMini `json:"parent"`
|
|
||||||
SharedLink struct {
|
SharedLink struct {
|
||||||
URL string `json:"url,omitempty"`
|
URL string `json:"url,omitempty"`
|
||||||
Access string `json:"access,omitempty"`
|
Access string `json:"access,omitempty"`
|
||||||
} `json:"shared_link"`
|
} `json:"shared_link"`
|
||||||
OwnedBy struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Login string `json:"login"`
|
|
||||||
} `json:"owned_by"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the item
|
// ModTime returns the modification time of the item
|
||||||
@@ -120,11 +103,10 @@ func (i *Item) ModTime() (t time.Time) {
|
|||||||
|
|
||||||
// FolderItems is returned from the GetFolderItems call
|
// FolderItems is returned from the GetFolderItems call
|
||||||
type FolderItems struct {
|
type FolderItems struct {
|
||||||
TotalCount int `json:"total_count"`
|
TotalCount int `json:"total_count"`
|
||||||
Entries []Item `json:"entries"`
|
Entries []Item `json:"entries"`
|
||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
|
||||||
Order []struct {
|
Order []struct {
|
||||||
By string `json:"by"`
|
By string `json:"by"`
|
||||||
Direction string `json:"direction"`
|
Direction string `json:"direction"`
|
||||||
@@ -150,26 +132,6 @@ type UploadFile struct {
|
|||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// PreUploadCheck is the request for upload preflight check
|
|
||||||
type PreUploadCheck struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Parent Parent `json:"parent"`
|
|
||||||
Size *int64 `json:"size,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckResponse is the response from upload preflight check
|
|
||||||
// if successful
|
|
||||||
type PreUploadCheckResponse struct {
|
|
||||||
UploadToken string `json:"upload_token"`
|
|
||||||
UploadURL string `json:"upload_url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreUploadCheckConflict is returned in the ContextInfo error field
|
|
||||||
// from PreUploadCheck when the error code is "item_name_in_use"
|
|
||||||
type PreUploadCheckConflict struct {
|
|
||||||
Conflicts ItemMini `json:"conflicts"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateFileModTime is used in Update File Info
|
// UpdateFileModTime is used in Update File Info
|
||||||
type UpdateFileModTime struct {
|
type UpdateFileModTime struct {
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
@@ -280,30 +242,3 @@ type User struct {
|
|||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
AvatarURL string `json:"avatar_url"`
|
AvatarURL string `json:"avatar_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
|
||||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
|
||||||
"ITEM_COPY": {},
|
|
||||||
"ITEM_CREATE": {},
|
|
||||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
|
||||||
"ITEM_MODIFY": {},
|
|
||||||
"ITEM_MOVE": {},
|
|
||||||
"ITEM_RENAME": {},
|
|
||||||
"ITEM_TRASH": {},
|
|
||||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
|
||||||
"ITEM_UPLOAD": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Event is an array element in the response returned from /events
|
|
||||||
type Event struct {
|
|
||||||
EventType string `json:"event_type"`
|
|
||||||
EventID string `json:"event_id"`
|
|
||||||
Source Item `json:"source"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Events is returned from /events
|
|
||||||
type Events struct {
|
|
||||||
ChunkSize int64 `json:"chunk_size"`
|
|
||||||
Entries []Event `json:"entries"`
|
|
||||||
NextStreamPosition int64 `json:"next_stream_position"`
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,6 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -16,6 +15,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
@@ -140,7 +140,7 @@ outer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||||
@@ -151,7 +151,7 @@ outer:
|
|||||||
}
|
}
|
||||||
err = json.Unmarshal(body, &result)
|
err = json.Unmarshal(body, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't decode commit response: %q: %w", body, err)
|
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
@@ -177,7 +177,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
// Create upload session
|
// Create upload session
|
||||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("multipart upload create session failed: %w", err)
|
return errors.Wrap(err, "multipart upload create session failed")
|
||||||
}
|
}
|
||||||
chunkSize := session.PartSize
|
chunkSize := session.PartSize
|
||||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||||
@@ -222,7 +222,7 @@ outer:
|
|||||||
// Read the chunk
|
// Read the chunk
|
||||||
_, err = io.ReadFull(in, buf)
|
_, err = io.ReadFull(in, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("multipart upload failed to read source: %w", err)
|
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||||
break outer
|
break outer
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +238,7 @@ outer:
|
|||||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("multipart upload failed to upload part: %w", err)
|
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||||
select {
|
select {
|
||||||
case errs <- err:
|
case errs <- err:
|
||||||
default:
|
default:
|
||||||
@@ -266,11 +266,11 @@ outer:
|
|||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||||
return fmt.Errorf("multipart upload failed %v - not sure why", o)
|
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||||
}
|
}
|
||||||
return o.setMetaData(&result.Entries[0])
|
return o.setMetaData(&result.Entries[0])
|
||||||
}
|
}
|
||||||
|
|||||||
123
backend/cache/cache.go
vendored
123
backend/cache/cache.go
vendored
@@ -1,12 +1,9 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
// Package cache implements a virtual provider to cache existing remotes.
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@@ -21,6 +18,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -70,28 +68,26 @@ func init() {
|
|||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "The URL of the Plex server.",
|
Help: "The URL of the Plex server",
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "The username of the Plex user.",
|
Help: "The username of the Plex user",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "The password of the Plex user.",
|
Help: "The password of the Plex user",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_token",
|
Name: "plex_token",
|
||||||
Help: "The plex token for authentication - auto set normally.",
|
Help: "The plex token for authentication - auto set normally",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_insecure",
|
Name: "plex_insecure",
|
||||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
Help: "Skip all certificate verification when connecting to the Plex server",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
@@ -102,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
|
|||||||
will need to be cleared or unexpected EOF errors will occur.`,
|
will need to be cleared or unexpected EOF errors will occur.`,
|
||||||
Default: DefCacheChunkSize,
|
Default: DefCacheChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1M",
|
Value: "1m",
|
||||||
Help: "1 MiB",
|
Help: "1MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "5M",
|
Value: "5M",
|
||||||
Help: "5 MiB",
|
Help: "5 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10M",
|
Value: "10M",
|
||||||
Help: "10 MiB",
|
Help: "10 MB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "info_age",
|
||||||
@@ -136,22 +132,22 @@ oldest chunks until it goes under this value.`,
|
|||||||
Default: DefCacheTotalChunkSize,
|
Default: DefCacheTotalChunkSize,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "500M",
|
||||||
Help: "500 MiB",
|
Help: "500 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "1G",
|
||||||
Help: "1 GiB",
|
Help: "1 GB",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "10G",
|
||||||
Help: "10 GiB",
|
Help: "10 GB",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "db_path",
|
Name: "db_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
|
Help: "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_path",
|
Name: "chunk_path",
|
||||||
Default: filepath.Join(config.GetCacheDir(), "cache-backend"),
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
Help: `Directory to cache chunk files.
|
Help: `Directory to cache chunk files.
|
||||||
|
|
||||||
Path to where partial file data (chunks) are stored locally. The remote
|
Path to where partial file data (chunks) are stored locally. The remote
|
||||||
@@ -171,7 +167,6 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
|
|||||||
Name: "chunk_clean_interval",
|
Name: "chunk_clean_interval",
|
||||||
Default: DefCacheChunkCleanInterval,
|
Default: DefCacheChunkCleanInterval,
|
||||||
Help: `How often should the cache perform cleanups of the chunk storage.
|
Help: `How often should the cache perform cleanups of the chunk storage.
|
||||||
|
|
||||||
The default value should be ok for most people. If you find that the
|
The default value should be ok for most people. If you find that the
|
||||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||||
this value to force it to perform cleanups more often.`,
|
this value to force it to perform cleanups more often.`,
|
||||||
@@ -225,7 +220,7 @@ available on the local machine.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "rps",
|
Name: "rps",
|
||||||
Default: int(DefCacheRps),
|
Default: int(DefCacheRps),
|
||||||
Help: `Limits the number of requests per second to the source FS (-1 to disable).
|
Help: `Limits the number of requests per second to the source FS (-1 to disable)
|
||||||
|
|
||||||
This setting places a hard limit on the number of requests per second
|
This setting places a hard limit on the number of requests per second
|
||||||
that cache will be doing to the cloud provider remote and try to
|
that cache will be doing to the cloud provider remote and try to
|
||||||
@@ -246,7 +241,7 @@ still pass.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "writes",
|
Name: "writes",
|
||||||
Default: DefCacheWrites,
|
Default: DefCacheWrites,
|
||||||
Help: `Cache file data on writes through the FS.
|
Help: `Cache file data on writes through the FS
|
||||||
|
|
||||||
If you need to read files immediately after you upload them through
|
If you need to read files immediately after you upload them through
|
||||||
cache you can enable this flag to have their data stored in the
|
cache you can enable this flag to have their data stored in the
|
||||||
@@ -267,7 +262,7 @@ provider`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "tmp_wait_time",
|
Name: "tmp_wait_time",
|
||||||
Default: DefCacheTmpWaitTime,
|
Default: DefCacheTmpWaitTime,
|
||||||
Help: `How long should files be stored in local cache before being uploaded.
|
Help: `How long should files be stored in local cache before being uploaded
|
||||||
|
|
||||||
This is the duration that a file must wait in the temporary location
|
This is the duration that a file must wait in the temporary location
|
||||||
_cache-tmp-upload-path_ before it is selected for upload.
|
_cache-tmp-upload-path_ before it is selected for upload.
|
||||||
@@ -278,7 +273,7 @@ to start the upload if a queue formed for this purpose.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "db_wait_time",
|
Name: "db_wait_time",
|
||||||
Default: DefCacheDbWaitTime,
|
Default: DefCacheDbWaitTime,
|
||||||
Help: `How long to wait for the DB to be available - 0 is unlimited.
|
Help: `How long to wait for the DB to be available - 0 is unlimited
|
||||||
|
|
||||||
Only one process can have the DB open at any one time, so rclone waits
|
Only one process can have the DB open at any one time, so rclone waits
|
||||||
for this duration for the DB to become available before it gives an
|
for this duration for the DB to become available before it gives an
|
||||||
@@ -344,14 +339,8 @@ func parseRootPath(path string) (string, error) {
|
|||||||
return strings.Trim(path, "/"), nil
|
return strings.Trim(path, "/"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var warnDeprecated sync.Once
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
warnDeprecated.Do(func() {
|
|
||||||
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -359,7 +348,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||||
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,13 +358,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
|
|
||||||
rpath, err := parseRootPath(rootPath)
|
rpath, err := parseRootPath(rootPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
|
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||||
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||||
}
|
}
|
||||||
var fsErr error
|
var fsErr error
|
||||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||||
@@ -397,18 +386,14 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
notifiedRemotes: make(map[string]bool),
|
notifiedRemotes: make(map[string]bool),
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
rps := rate.Inf
|
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||||
if opt.Rps > 0 {
|
|
||||||
rps = rate.Limit(float64(opt.Rps))
|
|
||||||
}
|
|
||||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
|
||||||
|
|
||||||
f.plexConnector = &plexConnector{}
|
f.plexConnector = &plexConnector{}
|
||||||
if opt.PlexURL != "" {
|
if opt.PlexURL != "" {
|
||||||
if opt.PlexToken != "" {
|
if opt.PlexToken != "" {
|
||||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
@@ -420,7 +405,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
m.Set("plex_token", token)
|
m.Set("plex_token", token)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -429,8 +414,8 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
dbPath := f.opt.DbPath
|
dbPath := f.opt.DbPath
|
||||||
chunkPath := f.opt.ChunkPath
|
chunkPath := f.opt.ChunkPath
|
||||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||||
if dbPath != filepath.Join(config.GetCacheDir(), "cache-backend") &&
|
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||||
chunkPath == filepath.Join(config.GetCacheDir(), "cache-backend") {
|
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||||
chunkPath = dbPath
|
chunkPath = dbPath
|
||||||
}
|
}
|
||||||
if filepath.Ext(dbPath) != "" {
|
if filepath.Ext(dbPath) != "" {
|
||||||
@@ -441,11 +426,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
}
|
}
|
||||||
err = os.MkdirAll(dbPath, os.ModePerm)
|
err = os.MkdirAll(dbPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
|
||||||
}
|
}
|
||||||
err = os.MkdirAll(chunkPath, os.ModePerm)
|
err = os.MkdirAll(chunkPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPath = filepath.Join(dbPath, name+".db")
|
dbPath = filepath.Join(dbPath, name+".db")
|
||||||
@@ -457,7 +442,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to start cache db: %w", err)
|
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||||
}
|
}
|
||||||
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
// Trap SIGINT and SIGTERM to close the DB handle gracefully
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
@@ -491,12 +476,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
|||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||||
@@ -613,7 +598,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
m, err := f.Stats()
|
m, err := f.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error while getting cache stats")
|
return out, errors.Errorf("error while getting cache stats")
|
||||||
}
|
}
|
||||||
out["status"] = "ok"
|
out["status"] = "ok"
|
||||||
out["stats"] = m
|
out["stats"] = m
|
||||||
@@ -640,7 +625,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
remoteInt, ok := in["remote"]
|
remoteInt, ok := in["remote"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return out, fmt.Errorf("remote is needed")
|
return out, errors.Errorf("remote is needed")
|
||||||
}
|
}
|
||||||
remote := remoteInt.(string)
|
remote := remoteInt.(string)
|
||||||
withData := false
|
withData := false
|
||||||
@@ -651,7 +636,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
|
|
||||||
remote = f.unwrapRemote(remote)
|
remote = f.unwrapRemote(remote)
|
||||||
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
|
||||||
return out, fmt.Errorf("%s doesn't exist in cache", remote)
|
return out, errors.Errorf("%s doesn't exist in cache", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
co := NewObject(f, remote)
|
co := NewObject(f, remote)
|
||||||
@@ -660,7 +645,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
cd := NewDirectory(f, remote)
|
cd := NewDirectory(f, remote)
|
||||||
err := f.cache.ExpireDir(cd)
|
err := f.cache.ExpireDir(cd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error expiring directory: %w", err)
|
return out, errors.WithMessage(err, "error expiring directory")
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
|
||||||
@@ -671,7 +656,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
|
|||||||
// expire the entry
|
// expire the entry
|
||||||
err = f.cache.ExpireObject(co, withData)
|
err = f.cache.ExpireObject(co, withData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("error expiring file: %w", err)
|
return out, errors.WithMessage(err, "error expiring file")
|
||||||
}
|
}
|
||||||
// notify vfs too
|
// notify vfs too
|
||||||
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
|
||||||
@@ -692,24 +677,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
case 1:
|
case 1:
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
end = start + 1
|
end = start + 1
|
||||||
case 2:
|
case 2:
|
||||||
if ints[0] != "" {
|
if ints[0] != "" {
|
||||||
start, err = strconv.ParseInt(ints[0], 10, 64)
|
start, err = strconv.ParseInt(ints[0], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ints[1] != "" {
|
if ints[1] != "" {
|
||||||
end, err = strconv.ParseInt(ints[1], 10, 64)
|
end, err = strconv.ParseInt(ints[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid range: %q", part)
|
return nil, errors.Errorf("invalid range: %q", part)
|
||||||
}
|
}
|
||||||
crs = append(crs, chunkRange{start: start, end: end})
|
crs = append(crs, chunkRange{start: start, end: end})
|
||||||
}
|
}
|
||||||
@@ -764,18 +749,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
|||||||
delete(in, "chunks")
|
delete(in, "chunks")
|
||||||
crs, err := parseChunks(s)
|
crs, err := parseChunks(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
|
return nil, errors.Wrap(err, "invalid chunks parameter")
|
||||||
}
|
}
|
||||||
var files [][2]string
|
var files [][2]string
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
if !strings.HasPrefix(k, "file") {
|
if !strings.HasPrefix(k, "file") {
|
||||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case string:
|
case string:
|
||||||
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
files = append(files, [2]string{v, f.unwrapRemote(v)})
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
|
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
type fileStatus struct {
|
type fileStatus struct {
|
||||||
@@ -1040,7 +1025,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||||
}
|
}
|
||||||
entries = nil //nolint:ineffassign
|
entries = nil
|
||||||
|
|
||||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||||
var batchDirectories []*Directory
|
var batchDirectories []*Directory
|
||||||
@@ -1131,7 +1116,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown object type %T", entry)
|
return errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1750,7 +1735,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -1789,7 +1774,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopBackgroundRunners will signal all the runners to stop their work
|
// StopBackgroundRunners will signall all the runners to stop their work
|
||||||
// can be triggered from a terminate signal or from testing between runs
|
// can be triggered from a terminate signal or from testing between runs
|
||||||
func (f *Fs) StopBackgroundRunners() {
|
func (f *Fs) StopBackgroundRunners() {
|
||||||
f.cleanupChan <- false
|
f.cleanupChan <- false
|
||||||
|
|||||||
144
backend/cache/cache_internal_test.go
vendored
144
backend/cache/cache_internal_test.go
vendored
@@ -1,5 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -7,21 +7,21 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/cache"
|
"github.com/rclone/rclone/backend/cache"
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/rclone/rclone/backend/drive"
|
||||||
@@ -101,12 +101,14 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
// Instantiate inner fs
|
// Instantiate inner fs
|
||||||
innerFolder := "inner"
|
innerFolder := "inner"
|
||||||
runInstance.mkdir(t, rootFs, innerFolder)
|
runInstance.mkdir(t, rootFs, innerFolder)
|
||||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||||
|
|
||||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||||
listRoot, err := runInstance.list(t, rootFs, "")
|
listRoot, err := runInstance.list(t, rootFs, "")
|
||||||
@@ -164,7 +166,7 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||||
for _, r := range li2 {
|
for _, r := range li2 {
|
||||||
var err error
|
var err error
|
||||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||||
if err != nil || len(ci) == 0 {
|
if err != nil || len(ci) == 0 {
|
||||||
log.Printf("========== '%v' not in cache", r)
|
log.Printf("========== '%v' not in cache", r)
|
||||||
} else {
|
} else {
|
||||||
@@ -223,7 +225,8 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -255,7 +258,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalObjNotFound(t *testing.T) {
|
func TestInternalObjNotFound(t *testing.T) {
|
||||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
@@ -265,7 +269,8 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||||
testy.SkipUnreliable(t)
|
testy.SkipUnreliable(t)
|
||||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -288,11 +293,9 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
|
||||||
t.Skip("Skip test on windows/386")
|
|
||||||
}
|
|
||||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||||
@@ -310,7 +313,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
|||||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||||
testy.SkipUnreliable(t)
|
testy.SkipUnreliable(t)
|
||||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -339,7 +343,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
}
|
}
|
||||||
@@ -369,7 +374,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second
|
vfsflags.Opt.DirCacheTime = time.Second
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skip("test skipped with crypt remote")
|
t.Skip("test skipped with crypt remote")
|
||||||
}
|
}
|
||||||
@@ -395,7 +401,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -435,7 +442,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if coSize != expectedSize {
|
if coSize != expectedSize {
|
||||||
return fmt.Errorf("%v <> %v", coSize, expectedSize)
|
return errors.Errorf("%v <> %v", coSize, expectedSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
@@ -449,7 +456,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalMoveWithNotify(t *testing.T) {
|
func TestInternalMoveWithNotify(t *testing.T) {
|
||||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if !runInstance.wrappedIsExternal {
|
if !runInstance.wrappedIsExternal {
|
||||||
t.Skipf("Not external")
|
t.Skipf("Not external")
|
||||||
}
|
}
|
||||||
@@ -490,7 +498,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
log.Printf("not expected listing /test: %v", li)
|
log.Printf("not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return errors.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
@@ -500,7 +508,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
@@ -510,21 +518,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/second: %v", li)
|
log.Printf("not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return errors.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return errors.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return errors.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("complete listing: %v", li)
|
log.Printf("complete listing: %v", li)
|
||||||
@@ -535,7 +543,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
if !runInstance.wrappedIsExternal {
|
if !runInstance.wrappedIsExternal {
|
||||||
t.Skipf("Not external")
|
t.Skipf("Not external")
|
||||||
}
|
}
|
||||||
@@ -578,17 +587,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test")
|
log.Printf("not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return errors.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one")
|
log.Printf("not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return errors.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one/test2")
|
log.Printf("not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return errors.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -597,21 +606,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
log.Printf("not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return errors.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
log.Printf("not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return errors.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
log.Printf("not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return errors.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
log.Printf("unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return errors.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
log.Printf("complete listing /test/one/test2")
|
log.Printf("complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
@@ -621,7 +630,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -653,7 +663,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalCacheWrites(t *testing.T) {
|
func TestInternalCacheWrites(t *testing.T) {
|
||||||
id := "ticw"
|
id := "ticw"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -670,11 +681,9 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" && runtime.GOARCH == "386" {
|
|
||||||
t.Skip("Skip test on windows/386")
|
|
||||||
}
|
|
||||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -709,7 +718,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -746,7 +756,9 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||||
|
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
t.Skipf("skipping crypt")
|
t.Skipf("skipping crypt")
|
||||||
@@ -822,9 +834,9 @@ func newRun() *run {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if uploadDir == "" {
|
if uploadDir == "" {
|
||||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
log.Fatalf("Failed to create temp dir: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
@@ -847,7 +859,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
|||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||||
fstest.Initialise()
|
fstest.Initialise()
|
||||||
remoteExists := false
|
remoteExists := false
|
||||||
for _, s := range config.FileSections() {
|
for _, s := range config.FileSections() {
|
||||||
@@ -907,9 +919,9 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
runInstance.rootIsCrypt = rootIsCrypt
|
runInstance.rootIsCrypt = rootIsCrypt
|
||||||
runInstance.dbPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote+".db")
|
runInstance.dbPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote+".db")
|
||||||
runInstance.chunkPath = filepath.Join(config.GetCacheDir(), "cache-backend", cacheRemote)
|
runInstance.chunkPath = filepath.Join(config.CacheDir, "cache-backend", cacheRemote)
|
||||||
runInstance.vfsCachePath = filepath.Join(config.GetCacheDir(), "vfs", remote)
|
runInstance.vfsCachePath = filepath.Join(config.CacheDir, "vfs", remote)
|
||||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -940,15 +952,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
|
||||||
runInstance.cleanupFs(t, f)
|
|
||||||
})
|
|
||||||
|
|
||||||
return f, boltDb
|
return f, boltDb
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||||
err := f.Features().Purge(context.Background(), "")
|
err := f.Features().Purge(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
@@ -970,7 +977,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
|||||||
chunk := int64(1024)
|
chunk := int64(1024)
|
||||||
cnt := size / chunk
|
cnt := size / chunk
|
||||||
left := size % chunk
|
left := size % chunk
|
||||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := 0; i < int(cnt); i++ {
|
for i := 0; i < int(cnt); i++ {
|
||||||
@@ -1048,7 +1055,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||||
|
|
||||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||||
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||||
}
|
}
|
||||||
return checkSample, nil
|
return checkSample, nil
|
||||||
}
|
}
|
||||||
@@ -1098,6 +1105,27 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
|||||||
return l, err
|
return l, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = in.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = out.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = io.Copy(out, in)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -1222,7 +1250,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
case state = <-buCh:
|
case state = <-buCh:
|
||||||
// continue
|
// continue
|
||||||
case <-time.After(maxDuration):
|
case <-time.After(maxDuration):
|
||||||
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
|
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
checkRemote := state.Remote
|
checkRemote := state.Remote
|
||||||
@@ -1239,7 +1267,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
|
||||||
}()
|
}()
|
||||||
return waitCh
|
return waitCh
|
||||||
}
|
}
|
||||||
|
|||||||
8
backend/cache/cache_test.go
vendored
8
backend/cache/cache_test.go
vendored
@@ -1,7 +1,7 @@
|
|||||||
// Test Cache filesystem interface
|
// Test Cache filesystem interface
|
||||||
|
|
||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -18,8 +18,8 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
1
backend/cache/cache_unsupported.go
vendored
1
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,6 @@
|
|||||||
// Build for cache for unsupported platforms to stop go complaining
|
// Build for cache for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
//go:build plan9 || js
|
|
||||||
// +build plan9 js
|
// +build plan9 js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
32
backend/cache/cache_upload_test.go
vendored
32
backend/cache/cache_upload_test.go
vendored
@@ -1,5 +1,5 @@
|
|||||||
//go:build !plan9 && !js && !race
|
// +build !plan9,!js
|
||||||
// +build !plan9,!js,!race
|
// +build !race
|
||||||
|
|
||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
@@ -21,8 +21,10 @@ import (
|
|||||||
|
|
||||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
|||||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
|||||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -152,19 +162,21 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
totalFiles := 10
|
totalFiles := 10
|
||||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := 0; i < totalFiles; i++ {
|
for i := 0; i < totalFiles; i++ {
|
||||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||||
id := "tiutfo"
|
id := "tiutfo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
|||||||
1
backend/cache/directory.go
vendored
1
backend/cache/directory.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|||||||
7
backend/cache/handle.go
vendored
7
backend/cache/handle.go
vendored
@@ -1,11 +1,9 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -14,6 +12,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -243,7 +242,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("chunk not found %v", chunkStart)
|
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||||
}
|
}
|
||||||
|
|
||||||
// first chunk will be aligned with the start
|
// first chunk will be aligned with the start
|
||||||
@@ -323,7 +322,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||||
r.offset = r.cachedObject.Size() + offset
|
r.offset = r.cachedObject.Size() + offset
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
|
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
|
|||||||
15
backend/cache/object.go
vendored
15
backend/cache/object.go
vendored
@@ -1,16 +1,15 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
@@ -178,14 +177,10 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
|||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||||
if err != nil {
|
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||||
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||||
if err != nil {
|
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||||
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
@@ -257,7 +252,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return fmt.Errorf("%v is currently uploading, can't update", o)
|
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
@@ -296,7 +291,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||||
return fmt.Errorf("%v is currently uploading, can't delete", o)
|
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove(ctx)
|
err := o.Object.Remove(ctx)
|
||||||
|
|||||||
7
backend/cache/plex.go
vendored
7
backend/cache/plex.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
@@ -8,7 +7,7 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -167,7 +166,7 @@ func (p *plexConnector) listenWebsocket() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var data []byte
|
var data []byte
|
||||||
data, err = io.ReadAll(resp.Body)
|
data, err = ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -213,7 +212,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
var data map[string]interface{}
|
var data map[string]interface{}
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %v", err)
|
||||||
}
|
}
|
||||||
tokenGen, ok := get(data, "user", "authToken")
|
tokenGen, ok := get(data, "user", "authToken")
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
10
backend/cache/storage_memory.go
vendored
10
backend/cache/storage_memory.go
vendored
@@ -1,15 +1,14 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
cache "github.com/patrickmn/go-cache"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,7 +52,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
|
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a new chunk of a cached object
|
// AddChunk adds a new chunk of a cached object
|
||||||
@@ -76,7 +75,10 @@ func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
|||||||
|
|
||||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||||
for key := range m.db.Items() {
|
var items map[string]cache.Item
|
||||||
|
|
||||||
|
items = m.db.Items()
|
||||||
|
for key := range items {
|
||||||
sepIdx := strings.LastIndex(key, "-")
|
sepIdx := strings.LastIndex(key, "-")
|
||||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
94
backend/cache/storage_persistent.go
vendored
94
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9 && !js
|
|
||||||
// +build !plan9,!js
|
// +build !plan9,!js
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
@@ -9,6 +8,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
bolt "go.etcd.io/bbolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
@@ -118,11 +119,11 @@ func (b *Persistent) connect() error {
|
|||||||
|
|
||||||
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
err = os.MkdirAll(b.dataPath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
|
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||||
}
|
}
|
||||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
|
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||||
}
|
}
|
||||||
if b.features.PurgeDb {
|
if b.features.PurgeDb {
|
||||||
b.Purge()
|
b.Purge()
|
||||||
@@ -174,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(remote, false, tx)
|
bucket := b.getBucket(remote, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", remote)
|
return errors.Errorf("couldn't open bucket (%v)", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
data := bucket.Get([]byte("."))
|
data := bucket.Get([]byte("."))
|
||||||
@@ -182,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
|
|||||||
return json.Unmarshal(data, cd)
|
return json.Unmarshal(data, cd)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("%v not found", remote)
|
return errors.Errorf("%v not found", remote)
|
||||||
})
|
})
|
||||||
|
|
||||||
return cd, err
|
return cd, err
|
||||||
@@ -207,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
|
||||||
}
|
}
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cachedDir := range cachedDirs {
|
for _, cachedDir := range cachedDirs {
|
||||||
@@ -224,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
|
|||||||
|
|
||||||
encoded, err := json.Marshal(cachedDir)
|
encoded, err := json.Marshal(cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
|
||||||
}
|
}
|
||||||
err = b.Put([]byte("."), encoded)
|
err = b.Put([]byte("."), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -242,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
bucket := b.getBucket(cachedDir.abs(), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
|
||||||
}
|
}
|
||||||
|
|
||||||
val := bucket.Get([]byte("."))
|
val := bucket.Get([]byte("."))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
err := json.Unmarshal(val, cachedDir)
|
err := json.Unmarshal(val, cachedDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error during unmarshalling obj: %w", err)
|
return errors.Errorf("error during unmarshalling obj: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("missing cached dir: %v", cachedDir)
|
return errors.Errorf("missing cached dir: %v", cachedDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -267,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
|
|||||||
// we try to find a cached meta for the dir
|
// we try to find a cached meta for the dir
|
||||||
currentBucket := c.Bucket().Bucket(k)
|
currentBucket := c.Bucket().Bucket(k)
|
||||||
if currentBucket == nil {
|
if currentBucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", string(k))
|
return errors.Errorf("couldn't open bucket (%v)", string(k))
|
||||||
}
|
}
|
||||||
|
|
||||||
metaKey := currentBucket.Get([]byte("."))
|
metaKey := currentBucket.Get([]byte("."))
|
||||||
@@ -316,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open bucket (%v)", fp)
|
return errors.Errorf("couldn't open bucket (%v)", fp)
|
||||||
}
|
}
|
||||||
// delete the cached dir
|
// delete the cached dir
|
||||||
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
|
||||||
@@ -376,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
|
|||||||
return b.db.View(func(tx *bolt.Tx) error {
|
return b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
bucket := b.getBucket(cachedObject.Dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
|
||||||
}
|
}
|
||||||
val := bucket.Get([]byte(cachedObject.Name))
|
val := bucket.Get([]byte(cachedObject.Name))
|
||||||
if val != nil {
|
if val != nil {
|
||||||
return json.Unmarshal(val, cachedObject)
|
return json.Unmarshal(val, cachedObject)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
|
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -391,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
bucket := b.getBucket(cachedObject.Dir, true, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
|
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
|
||||||
}
|
}
|
||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(cachedObject)
|
encoded, err := json.Marshal(cachedObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -412,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
bucket := b.getBucket(cleanPath(parentDir), false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
|
||||||
}
|
}
|
||||||
err := bucket.Delete([]byte(cleanPath(objName)))
|
err := bucket.Delete([]byte(cleanPath(objName)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -444,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
err := b.db.View(func(tx *bolt.Tx) error {
|
err := b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := b.getBucket(dir, false, tx)
|
bucket := b.getBucket(dir, false, tx)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't open parent bucket for %v", remote)
|
return errors.Errorf("couldn't open parent bucket for %v", remote)
|
||||||
}
|
}
|
||||||
if f := bucket.Bucket([]byte(name)); f != nil {
|
if f := bucket.Bucket([]byte(name)); f != nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -453,9 +454,12 @@ func (b *Persistent) HasEntry(remote string) bool {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("couldn't find object (%v)", remote)
|
return errors.Errorf("couldn't find object (%v)", remote)
|
||||||
})
|
})
|
||||||
return err == nil
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasChunk confirms the existence of a single chunk of an object
|
// HasChunk confirms the existence of a single chunk of an object
|
||||||
@@ -472,7 +476,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
|||||||
var data []byte
|
var data []byte
|
||||||
|
|
||||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||||
data, err := os.ReadFile(fp)
|
data, err := ioutil.ReadFile(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -485,7 +489,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
|||||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||||
|
|
||||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -550,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
err := b.db.Update(func(tx *bolt.Tx) error {
|
err := b.db.Update(func(tx *bolt.Tx) error {
|
||||||
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
|
||||||
if dataTsBucket == nil {
|
if dataTsBucket == nil {
|
||||||
return fmt.Errorf("couldn't open (%v) bucket", DataTsBucket)
|
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
|
||||||
}
|
}
|
||||||
// iterate through ts
|
// iterate through ts
|
||||||
c := dataTsBucket.Cursor()
|
c := dataTsBucket.Cursor()
|
||||||
@@ -728,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("not found %v-%v", path, offset)
|
return errors.Errorf("not found %v-%v", path, offset)
|
||||||
})
|
})
|
||||||
|
|
||||||
return t, err
|
return t, err
|
||||||
@@ -768,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
tempObj := &tempUploadInfo{
|
tempObj := &tempUploadInfo{
|
||||||
DestPath: destPath,
|
DestPath: destPath,
|
||||||
@@ -779,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -798,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
err = b.db.Update(func(tx *bolt.Tx) error {
|
err = b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -831,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("no pending upload found")
|
return errors.Errorf("no pending upload found")
|
||||||
})
|
})
|
||||||
|
|
||||||
return destPath, err
|
return destPath, err
|
||||||
@@ -842,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
started = tempObj.Started
|
started = tempObj.Started
|
||||||
@@ -864,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
|
|||||||
err = b.db.View(func(tx *bolt.Tx) error {
|
err = b.db.View(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket([]byte(tempBucket))
|
bucket := tx.Bucket([]byte(tempBucket))
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
@@ -894,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found: %w", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
tempObj.Started = false
|
tempObj.Started = false
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@@ -922,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
return bucket.Delete([]byte(remote))
|
return bucket.Delete([]byte(remote))
|
||||||
})
|
})
|
||||||
@@ -937,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't bucket for %v", tempBucket)
|
return errors.Errorf("couldn't bucket for %v", tempBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
var tempObj = &tempUploadInfo{}
|
var tempObj = &tempUploadInfo{}
|
||||||
v := bucket.Get([]byte(remote))
|
v := bucket.Get([]byte(remote))
|
||||||
err = json.Unmarshal(v, tempObj)
|
err = json.Unmarshal(v, tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
|
return errors.Errorf("pending upload (%v) not found %v", remote, err)
|
||||||
}
|
}
|
||||||
if tempObj.Started {
|
if tempObj.Started {
|
||||||
return fmt.Errorf("pending upload already started %v", remote)
|
return errors.Errorf("pending upload already started %v", remote)
|
||||||
}
|
}
|
||||||
err = fn(tempObj)
|
err = fn(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -965,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
|||||||
}
|
}
|
||||||
v2, err := json.Marshal(tempObj)
|
v2, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
err = bucket.Put([]byte(tempObj.DestPath), v2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("pending upload not updated: %w", err)
|
return errors.Errorf("pending upload not updated %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1010,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
|||||||
// cache Object Info
|
// cache Object Info
|
||||||
encoded, err := json.Marshal(tempObj)
|
encoded, err := json.Marshal(tempObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), encoded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,10 +8,10 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
@@ -21,6 +21,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -31,6 +32,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//
|
||||||
// Chunker's composite files have one or more chunks
|
// Chunker's composite files have one or more chunks
|
||||||
// and optional metadata object. If it's present,
|
// and optional metadata object. If it's present,
|
||||||
// meta object is named after the original file.
|
// meta object is named after the original file.
|
||||||
@@ -63,7 +65,7 @@ import (
|
|||||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||||
//
|
//
|
||||||
// When transactions is set to the norename style, data chunks will
|
// When transactions is set to the norename style, data chunks will
|
||||||
// keep their temporary chunk names (with the transaction identifier
|
// keep their temporary chunk names (with the transacion identifier
|
||||||
// suffix). To distinguish them from temporary chunks, the txn field
|
// suffix). To distinguish them from temporary chunks, the txn field
|
||||||
// of the metadata file is set to match the transaction identifier of
|
// of the metadata file is set to match the transaction identifier of
|
||||||
// the data chunks.
|
// the data chunks.
|
||||||
@@ -77,6 +79,7 @@ import (
|
|||||||
// Metadata format v1 does not define any control chunk types,
|
// Metadata format v1 does not define any control chunk types,
|
||||||
// they are currently ignored aka reserved.
|
// they are currently ignored aka reserved.
|
||||||
// In future they can be used to implement resumable uploads etc.
|
// In future they can be used to implement resumable uploads etc.
|
||||||
|
//
|
||||||
const (
|
const (
|
||||||
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
|
||||||
tempSuffixFormat = `_%04s`
|
tempSuffixFormat = `_%04s`
|
||||||
@@ -147,13 +150,12 @@ func init() {
|
|||||||
Name: "remote",
|
Name: "remote",
|
||||||
Required: true,
|
Required: true,
|
||||||
Help: `Remote to chunk/unchunk.
|
Help: `Remote to chunk/unchunk.
|
||||||
|
|
||||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||||
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
"myremote:bucket" or maybe "myremote:" (not recommended).`,
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Default: fs.SizeSuffix(2147483648), // 2 GiB
|
Default: fs.SizeSuffix(2147483648), // 2GB
|
||||||
Help: `Files larger than chunk size will be split in chunks.`,
|
Help: `Files larger than chunk size will be split in chunks.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "name_format",
|
Name: "name_format",
|
||||||
@@ -161,7 +163,6 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
|||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: `*.rclone_chunk.###`,
|
Default: `*.rclone_chunk.###`,
|
||||||
Help: `String format of chunk file names.
|
Help: `String format of chunk file names.
|
||||||
|
|
||||||
The two placeholders are: base file name (*) and chunk number (#...).
|
The two placeholders are: base file name (*) and chunk number (#...).
|
||||||
There must be one and only one asterisk and one or more consecutive hash characters.
|
There must be one and only one asterisk and one or more consecutive hash characters.
|
||||||
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
If chunk number has less digits than the number of hashes, it is left-padded by zeros.
|
||||||
@@ -173,57 +174,48 @@ Possible chunk files are ignored if their name does not match given format.`,
|
|||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: 1,
|
Default: 1,
|
||||||
Help: `Minimum valid chunk number. Usually 0 or 1.
|
Help: `Minimum valid chunk number. Usually 0 or 1.
|
||||||
|
|
||||||
By default chunk numbers start from 1.`,
|
By default chunk numbers start from 1.`,
|
||||||
}, {
|
}, {
|
||||||
Name: "meta_format",
|
Name: "meta_format",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Hide: fs.OptionHideCommandLine,
|
Hide: fs.OptionHideCommandLine,
|
||||||
Default: "simplejson",
|
Default: "simplejson",
|
||||||
Help: `Format of the metadata object or "none".
|
Help: `Format of the metadata object or "none". By default "simplejson".
|
||||||
|
|
||||||
By default "simplejson".
|
|
||||||
Metadata is a small JSON file named after the composite file.`,
|
Metadata is a small JSON file named after the composite file.`,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "none",
|
Value: "none",
|
||||||
Help: `Do not use metadata files at all.
|
Help: `Do not use metadata files at all. Requires hash type "none".`,
|
||||||
Requires hash type "none".`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "simplejson",
|
Value: "simplejson",
|
||||||
Help: `Simple JSON supports hash sums and chunk validation.
|
Help: `Simple JSON supports hash sums and chunk validation.
|
||||||
|
|
||||||
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
It has the following fields: ver, size, nchunks, md5, sha1.`,
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "hash_type",
|
Name: "hash_type",
|
||||||
Advanced: false,
|
Advanced: false,
|
||||||
Default: "md5",
|
Default: "md5",
|
||||||
Help: `Choose how chunker handles hash sums.
|
Help: `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
|
||||||
|
|
||||||
All modes but "none" require metadata.`,
|
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "none",
|
Value: "none",
|
||||||
Help: `Pass any hash supported by wrapped remote for non-chunked files.
|
Help: `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
|
||||||
Return nothing otherwise.`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "md5",
|
Value: "md5",
|
||||||
Help: `MD5 for composite files.`,
|
Help: `MD5 for composite files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1",
|
Value: "sha1",
|
||||||
Help: `SHA1 for composite files.`,
|
Help: `SHA1 for composite files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "md5all",
|
Value: "md5all",
|
||||||
Help: `MD5 for all files.`,
|
Help: `MD5 for all files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1all",
|
Value: "sha1all",
|
||||||
Help: `SHA1 for all files.`,
|
Help: `SHA1 for all files`,
|
||||||
}, {
|
}, {
|
||||||
Value: "md5quick",
|
Value: "md5quick",
|
||||||
Help: `Copying a file to chunker will request MD5 from the source.
|
Help: `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
|
||||||
Falling back to SHA1 if unsupported.`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "sha1quick",
|
Value: "sha1quick",
|
||||||
Help: `Similar to "md5quick" but prefers SHA1 over MD5.`,
|
Help: `Similar to "md5quick" but prefers SHA1 over MD5`,
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "fail_hard",
|
Name: "fail_hard",
|
||||||
@@ -287,13 +279,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
|
|
||||||
baseName, basePath, err := fspath.SplitFs(remote)
|
baseName, basePath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||||
}
|
}
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||||
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
baseFs, err := cache.Get(ctx, baseName+remotePath)
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", baseName+remotePath, err)
|
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||||
}
|
}
|
||||||
if !operations.CanServerSideMove(baseFs) {
|
if !operations.CanServerSideMove(baseFs) {
|
||||||
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
return nil, errors.New("can't use chunker on a backend which doesn't support server-side move or copy")
|
||||||
@@ -325,14 +317,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note 1: the features here are ones we could support, and they are
|
// Note 1: the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs.
|
// ANDed with the ones from wrappedFs.
|
||||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||||
@@ -391,7 +375,7 @@ type Fs struct {
|
|||||||
// configure must be called only from NewFs or by unit tests.
|
// configure must be called only from NewFs or by unit tests.
|
||||||
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
|
||||||
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
if err := f.setChunkNameFormat(nameFormat); err != nil {
|
||||||
return fmt.Errorf("invalid name format '%s': %w", nameFormat, err)
|
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
|
||||||
}
|
}
|
||||||
if err := f.setMetaFormat(metaFormat); err != nil {
|
if err := f.setMetaFormat(metaFormat); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -448,10 +432,10 @@ func (f *Fs) setHashType(hashType string) error {
|
|||||||
f.hashFallback = true
|
f.hashFallback = true
|
||||||
case "md5all":
|
case "md5all":
|
||||||
f.useMD5 = true
|
f.useMD5 = true
|
||||||
f.hashAll = !f.base.Hashes().Contains(hash.MD5) || f.base.Features().SlowHash
|
f.hashAll = !f.base.Hashes().Contains(hash.MD5)
|
||||||
case "sha1all":
|
case "sha1all":
|
||||||
f.useSHA1 = true
|
f.useSHA1 = true
|
||||||
f.hashAll = !f.base.Hashes().Contains(hash.SHA1) || f.base.Features().SlowHash
|
f.hashAll = !f.base.Hashes().Contains(hash.SHA1)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
return fmt.Errorf("unsupported hash type '%s'", hashType)
|
||||||
}
|
}
|
||||||
@@ -520,7 +504,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
|
|
||||||
strRegex := regexp.QuoteMeta(pattern)
|
strRegex := regexp.QuoteMeta(pattern)
|
||||||
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
|
||||||
strRegex = strings.ReplaceAll(strRegex, "\\*", mainNameRegStr)
|
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
|
||||||
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
|
||||||
f.nameRegexp = regexp.MustCompile(strRegex)
|
f.nameRegexp = regexp.MustCompile(strRegex)
|
||||||
|
|
||||||
@@ -529,7 +513,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
if numDigits > 1 {
|
if numDigits > 1 {
|
||||||
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
fmtDigits = fmt.Sprintf("%%0%dd", numDigits)
|
||||||
}
|
}
|
||||||
strFmt := strings.ReplaceAll(pattern, "%", "%%")
|
strFmt := strings.Replace(pattern, "%", "%%", -1)
|
||||||
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
strFmt = strings.Replace(strFmt, "*", "%s", 1)
|
||||||
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
f.dataNameFmt = reHashes.ReplaceAllLiteralString(strFmt, fmtDigits)
|
||||||
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
f.ctrlNameFmt = reHashes.ReplaceAllLiteralString(strFmt, "_%s")
|
||||||
@@ -547,6 +531,7 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
|
|||||||
//
|
//
|
||||||
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
// xactID is a transaction identifier. Empty xactID denotes active chunk,
|
||||||
// otherwise temporary chunk name is produced.
|
// otherwise temporary chunk name is produced.
|
||||||
|
//
|
||||||
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
|
||||||
dir, parentName := path.Split(filePath)
|
dir, parentName := path.Split(filePath)
|
||||||
var name, tempSuffix string
|
var name, tempSuffix string
|
||||||
@@ -712,6 +697,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
// directory together with dead chunks.
|
// directory together with dead chunks.
|
||||||
// In future a flag named like `--chunker-list-hidden` may be added to
|
// In future a flag named like `--chunker-list-hidden` may be added to
|
||||||
// rclone that will tell List to reveal hidden chunks.
|
// rclone that will tell List to reveal hidden chunks.
|
||||||
|
//
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.base.List(ctx, dir)
|
entries, err = f.base.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -826,7 +812,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
tempEntries = append(tempEntries, wrapDir)
|
tempEntries = append(tempEntries, wrapDir)
|
||||||
default:
|
default:
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, fmt.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "unknown object type %T", entry)
|
fs.Debugf(f, "unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@@ -871,6 +857,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
|||||||
// Note that chunker prefers analyzing file names rather than reading
|
// Note that chunker prefers analyzing file names rather than reading
|
||||||
// the content of meta object assuming that directory scans are fast
|
// the content of meta object assuming that directory scans are fast
|
||||||
// but opening even a small file can be slow on some backends.
|
// but opening even a small file can be slow on some backends.
|
||||||
|
//
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.scanObject(ctx, remote, false)
|
return f.scanObject(ctx, remote, false)
|
||||||
}
|
}
|
||||||
@@ -880,7 +867,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
// ignores non-chunked objects and skips chunk size checks.
|
// ignores non-chunked objects and skips chunk size checks.
|
||||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(false, remote); err != nil {
|
if err := f.forbidChunk(false, remote); err != nil {
|
||||||
return nil, fmt.Errorf("can't access: %w", err)
|
return nil, errors.Wrap(err, "can't access")
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -929,7 +916,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
|
|||||||
case fs.ErrorDirNotFound:
|
case fs.ErrorDirNotFound:
|
||||||
entries = nil
|
entries = nil
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("can't detect composite file: %w", err)
|
return nil, errors.Wrap(err, "can't detect composite file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.useNoRename {
|
if f.useNoRename {
|
||||||
@@ -1045,7 +1032,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
metadata, err := io.ReadAll(reader)
|
metadata, err := ioutil.ReadAll(reader)
|
||||||
_ = reader.Close() // ensure file handle is freed on windows
|
_ = reader.Close() // ensure file handle is freed on windows
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1069,7 +1056,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
case ErrMetaTooBig, ErrMetaUnknown:
|
case ErrMetaTooBig, ErrMetaUnknown:
|
||||||
return err // return these errors unwrapped for unit tests
|
return err // return these errors unwrapped for unit tests
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid metadata: %w", err)
|
return errors.Wrap(err, "invalid metadata")
|
||||||
}
|
}
|
||||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||||
return errors.New("metadata doesn't match file size")
|
return errors.New("metadata doesn't match file size")
|
||||||
@@ -1086,7 +1073,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
|
|
||||||
// readXactID returns the transaction ID stored in the passed metadata object
|
// readXactID returns the transaction ID stored in the passed metadata object
|
||||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||||
// if xactID has already been read and cached return it now
|
// if xactID has already been read and cahced return it now
|
||||||
if o.xIDCached {
|
if o.xIDCached {
|
||||||
return o.xactID, nil
|
return o.xactID, nil
|
||||||
}
|
}
|
||||||
@@ -1104,7 +1091,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
data, err := io.ReadAll(reader)
|
data, err := ioutil.ReadAll(reader)
|
||||||
_ = reader.Close() // ensure file handle is freed on windows
|
_ = reader.Close() // ensure file handle is freed on windows
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -1112,7 +1099,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
|||||||
|
|
||||||
switch o.f.opt.MetaFormat {
|
switch o.f.opt.MetaFormat {
|
||||||
case "simplejson":
|
case "simplejson":
|
||||||
if len(data) > maxMetadataSizeWritten {
|
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||||
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
|
||||||
}
|
}
|
||||||
var metadata metaSimpleJSON
|
var metadata metaSimpleJSON
|
||||||
@@ -1134,7 +1121,7 @@ func (f *Fs) put(
|
|||||||
|
|
||||||
// Perform consistency checks
|
// Perform consistency checks
|
||||||
if err := f.forbidChunk(src, remote); err != nil {
|
if err := f.forbidChunk(src, remote); err != nil {
|
||||||
return nil, fmt.Errorf("%s refused: %w", action, err)
|
return nil, errors.Wrap(err, action+" refused")
|
||||||
}
|
}
|
||||||
if target == nil {
|
if target == nil {
|
||||||
// Get target object with a quick directory scan
|
// Get target object with a quick directory scan
|
||||||
@@ -1148,7 +1135,7 @@ func (f *Fs) put(
|
|||||||
obj := target.(*Object)
|
obj := target.(*Object)
|
||||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
// refuse to update a file of unsupported format
|
// refuse to update a file of unsupported format
|
||||||
return nil, fmt.Errorf("refusing to %s: %w", action, err)
|
return nil, errors.Wrap(err, "refusing to "+action)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1227,7 +1214,7 @@ func (f *Fs) put(
|
|||||||
// and skips the "EOF" read. Hence, switch to next limit here.
|
// and skips the "EOF" read. Hence, switch to next limit here.
|
||||||
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
if !(c.chunkLimit == 0 || c.chunkLimit == c.chunkSize || c.sizeTotal == -1 || c.done) {
|
||||||
silentlyRemove(ctx, chunk)
|
silentlyRemove(ctx, chunk)
|
||||||
return nil, fmt.Errorf("destination ignored %d data bytes", c.chunkLimit)
|
return nil, fmt.Errorf("Destination ignored %d data bytes", c.chunkLimit)
|
||||||
}
|
}
|
||||||
c.chunkLimit = c.chunkSize
|
c.chunkLimit = c.chunkSize
|
||||||
|
|
||||||
@@ -1236,7 +1223,7 @@ func (f *Fs) put(
|
|||||||
|
|
||||||
// Validate uploaded size
|
// Validate uploaded size
|
||||||
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
if c.sizeTotal != -1 && c.readCount != c.sizeTotal {
|
||||||
return nil, fmt.Errorf("incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
return nil, fmt.Errorf("Incorrect upload size %d != %d", c.readCount, c.sizeTotal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for input that looks like valid metadata
|
// Check for input that looks like valid metadata
|
||||||
@@ -1273,7 +1260,7 @@ func (f *Fs) put(
|
|||||||
sizeTotal += chunk.Size()
|
sizeTotal += chunk.Size()
|
||||||
}
|
}
|
||||||
if sizeTotal != c.readCount {
|
if sizeTotal != c.readCount {
|
||||||
return nil, fmt.Errorf("incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
return nil, fmt.Errorf("Incorrect chunks size %d != %d", sizeTotal, c.readCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If previous object was chunked, remove its chunks
|
// If previous object was chunked, remove its chunks
|
||||||
@@ -1461,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
|||||||
c.accountBytes(size)
|
c.accountBytes(size)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
const bufLen = 1048576 // 1 MiB
|
const bufLen = 1048576 // 1MB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := size
|
n := size
|
||||||
@@ -1566,7 +1553,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Shouldn't return an error if it already exists
|
// Shouldn't return an error if it already exists
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
if err := f.forbidChunk(dir, dir); err != nil {
|
if err := f.forbidChunk(dir, dir); err != nil {
|
||||||
return fmt.Errorf("can't mkdir: %w", err)
|
return errors.Wrap(err, "can't mkdir")
|
||||||
}
|
}
|
||||||
return f.base.Mkdir(ctx, dir)
|
return f.base.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
@@ -1588,6 +1575,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
// This command will chain to `purge` from wrapped remote.
|
// This command will chain to `purge` from wrapped remote.
|
||||||
// As a result it removes not only composite chunker files with their
|
// As a result it removes not only composite chunker files with their
|
||||||
// active chunks but also all hidden temporary chunks in the directory.
|
// active chunks but also all hidden temporary chunks in the directory.
|
||||||
|
//
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||||
do := f.base.Features().Purge
|
do := f.base.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
@@ -1629,11 +1617,12 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
// Unsupported control chunks will get re-picked by a more recent
|
// Unsupported control chunks will get re-picked by a more recent
|
||||||
// rclone version with unexpected results. This can be helped by
|
// rclone version with unexpected results. This can be helped by
|
||||||
// the `delete hidden` flag above or at least the user has been warned.
|
// the `delete hidden` flag above or at least the user has been warned.
|
||||||
|
//
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||||
// operations.Move can still call Remove if chunker's Move refuses
|
// operations.Move can still call Remove if chunker's Move refuses
|
||||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||||
return fmt.Errorf("refuse to corrupt: %w", err)
|
return errors.Wrap(err, "refuse to corrupt")
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||||
// Proceed but warn user that unexpected things can happen.
|
// Proceed but warn user that unexpected things can happen.
|
||||||
@@ -1661,12 +1650,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
// copyOrMove implements copy or move
|
// copyOrMove implements copy or move
|
||||||
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMoveFn, md5, sha1, opName string) (fs.Object, error) {
|
||||||
if err := f.forbidChunk(o, remote); err != nil {
|
if err := f.forbidChunk(o, remote); err != nil {
|
||||||
return nil, fmt.Errorf("can't %s: %w", opName, err)
|
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||||
}
|
}
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
// Refuse to copy/move composite files with invalid or future
|
// Refuse to copy/move composite files with invalid or future
|
||||||
// metadata format which might involve unsupported chunk types.
|
// metadata format which might involve unsupported chunk types.
|
||||||
return nil, fmt.Errorf("can't %s this file: %w", opName, err)
|
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||||
}
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||||
@@ -1804,9 +1793,9 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1825,9 +1814,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1895,7 +1884,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.base.Features().CleanUp
|
do := f.base.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -1904,7 +1893,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.base.Features().About
|
do := f.base.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -2125,6 +2114,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|||||||
// file, then tries to read it from metadata. This in theory
|
// file, then tries to read it from metadata. This in theory
|
||||||
// handles the unusual case when a small file has been tampered
|
// handles the unusual case when a small file has been tampered
|
||||||
// on the level of wrapped remote but chunker is unaware of that.
|
// on the level of wrapped remote but chunker is unaware of that.
|
||||||
|
//
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
return "", err // valid metadata is required to get hash, abort
|
return "", err // valid metadata is required to get hash, abort
|
||||||
@@ -2162,7 +2152,7 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
if err := o.readMetadata(ctx); err != nil {
|
if err := o.readMetadata(ctx); err != nil {
|
||||||
// refuse to open unsupported format
|
// refuse to open unsupported format
|
||||||
return nil, fmt.Errorf("can't open: %w", err)
|
return nil, errors.Wrap(err, "can't open")
|
||||||
}
|
}
|
||||||
if !o.isComposite() {
|
if !o.isComposite() {
|
||||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||||
@@ -2413,6 +2403,7 @@ type metaSimpleJSON struct {
|
|||||||
// - for files larger than chunk size
|
// - for files larger than chunk size
|
||||||
// - if file contents can be mistaken as meta object
|
// - if file contents can be mistaken as meta object
|
||||||
// - if consistent hashing is On but wrapped remote can't provide given hash
|
// - if consistent hashing is On but wrapped remote can't provide given hash
|
||||||
|
//
|
||||||
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
|
||||||
version := metadataVersion
|
version := metadataVersion
|
||||||
if xactID == "" && version == 2 {
|
if xactID == "" && version == 2 {
|
||||||
@@ -2445,10 +2436,11 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1,
|
|||||||
// New format will have a higher version number and cannot be correctly
|
// New format will have a higher version number and cannot be correctly
|
||||||
// handled by current implementation.
|
// handled by current implementation.
|
||||||
// The version check below will then explicitly ask user to upgrade rclone.
|
// The version check below will then explicitly ask user to upgrade rclone.
|
||||||
|
//
|
||||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||||
// Be strict about JSON format
|
// Be strict about JSON format
|
||||||
// to reduce possibility that a random small file resembles metadata.
|
// to reduce possibility that a random small file resembles metadata.
|
||||||
if len(data) > maxMetadataSizeWritten {
|
if data != nil && len(data) > maxMetadataSizeWritten {
|
||||||
return nil, false, ErrMetaTooBig
|
return nil, false, ErrMetaTooBig
|
||||||
}
|
}
|
||||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||||
|
|||||||
@@ -5,15 +5,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
@@ -35,35 +33,11 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||||
Size: int64(kilobytes) * int64(fs.Kibi),
|
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]interface{}
|
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
||||||
configMap := configmap.Simple{}
|
|
||||||
for key, val := range opts {
|
|
||||||
configMap[key] = fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
rpath := fspath.JoinRootPath(f.Root(), path)
|
|
||||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), rpath)
|
|
||||||
fixFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return fixFs
|
|
||||||
}
|
|
||||||
|
|
||||||
var mtime1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
|
|
||||||
func testPutFile(ctx context.Context, t *testing.T, f fs.Fs, name, contents, message string, check bool) fs.Object {
|
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
|
||||||
assert.NotNil(t, obj, message)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// test chunk name parser
|
// test chunk name parser
|
||||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||||
saveOpt := f.opt
|
saveOpt := f.opt
|
||||||
@@ -413,7 +387,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
if r == nil {
|
if r == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, contents, string(data))
|
assert.Equal(t, contents, string(data))
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
@@ -440,7 +414,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
|||||||
checkSmallFile := func(name, contents string) {
|
checkSmallFile := func(name, contents string) {
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||||
assert.NotNil(t, put)
|
assert.NotNil(t, put)
|
||||||
checkSmallFileInternals(put)
|
checkSmallFileInternals(put)
|
||||||
checkContents(put, contents)
|
checkContents(put, contents)
|
||||||
@@ -489,7 +463,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
newFile := func(name string) fs.Object {
|
newFile := func(name string) fs.Object {
|
||||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
@@ -538,7 +512,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
var chunkContents []byte
|
var chunkContents []byte
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
chunkContents, err = io.ReadAll(r)
|
chunkContents, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -573,7 +547,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
|||||||
r, err = willyChunk.Open(ctx)
|
r, err = willyChunk.Open(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
_, err = io.ReadAll(r)
|
_, err = ioutil.ReadAll(r)
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -599,7 +573,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|||||||
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
|
||||||
filename = path.Join(dir, name)
|
filename = path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
|
||||||
txnID = chunkObj.xactID
|
txnID = chunkObj.xactID
|
||||||
@@ -643,13 +617,22 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
}()
|
}()
|
||||||
f.opt.FailHard = false
|
f.opt.FailHard = false
|
||||||
|
|
||||||
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
|
|
||||||
|
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||||
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||||
|
assert.NotNil(t, obj, message)
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
|
||||||
runSubtest := func(contents, name string) {
|
runSubtest := func(contents, name string) {
|
||||||
description := fmt.Sprintf("file with %s metadata", name)
|
description := fmt.Sprintf("file with %s metadata", name)
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||||
|
|
||||||
part := testPutFile(ctx, t, f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||||
_ = testPutFile(ctx, t, f, filename, contents, "upload "+description, false)
|
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||||
|
|
||||||
obj, err := f.NewObject(ctx, filename)
|
obj, err := f.NewObject(ctx, filename)
|
||||||
assert.NoError(t, err, "access "+description)
|
assert.NoError(t, err, "access "+description)
|
||||||
@@ -672,7 +655,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
assert.NoError(t, err, "open "+description)
|
assert.NoError(t, err, "open "+description)
|
||||||
assert.NotNil(t, r, "open stream of "+description)
|
assert.NotNil(t, r, "open stream of "+description)
|
||||||
if err == nil && r != nil {
|
if err == nil && r != nil {
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err, "read all of "+description)
|
assert.NoError(t, err, "read all of "+description)
|
||||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
@@ -695,7 +678,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
|||||||
|
|
||||||
// Test that chunker refuses to change on objects with future/unknown metadata
|
// Test that chunker refuses to change on objects with future/unknown metadata
|
||||||
func testFutureProof(t *testing.T, f *Fs) {
|
func testFutureProof(t *testing.T, f *Fs) {
|
||||||
if !f.useMeta {
|
if f.opt.MetaFormat == "none" {
|
||||||
t.Skip("this test requires metadata support")
|
t.Skip("this test requires metadata support")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -716,7 +699,7 @@ func testFutureProof(t *testing.T, f *Fs) {
|
|||||||
name = f.makeChunkName(name, part-1, "", "")
|
name = f.makeChunkName(name, part-1, "", "")
|
||||||
}
|
}
|
||||||
item := fstest.Item{Path: name, ModTime: modTime}
|
item := fstest.Item{Path: name, ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
|
||||||
assert.NotNil(t, obj, msg)
|
assert.NotNil(t, obj, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -758,8 +741,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
|||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
// Rcat must fail
|
// Rcat must fail
|
||||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||||
assert.Nil(t, robj)
|
assert.Nil(t, robj)
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -790,7 +773,7 @@ func testBackwardsCompatibility(t *testing.T, f *Fs) {
|
|||||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||||
filename := path.Join(dir, name)
|
filename := path.Join(dir, name)
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
require.NotNil(t, obj)
|
require.NotNil(t, obj)
|
||||||
return obj, filename
|
return obj, filename
|
||||||
}
|
}
|
||||||
@@ -844,7 +827,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||||
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
item := fstest.Item{Path: "movefile", ModTime: modTime}
|
||||||
contents := "abcdef"
|
contents := "abcdef"
|
||||||
file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
|
||||||
|
|
||||||
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
|
||||||
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
|
||||||
@@ -854,51 +837,13 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
|||||||
r, err := dstFile.Open(ctx)
|
r, err := dstFile.Open(ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.NotNil(t, r)
|
assert.NotNil(t, r)
|
||||||
data, err := io.ReadAll(r)
|
data, err := ioutil.ReadAll(r)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, contents, string(data))
|
assert.Equal(t, contents, string(data))
|
||||||
_ = r.Close()
|
_ = r.Close()
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
_ = operations.Purge(ctx, f.base, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that md5all creates metadata even for small files
|
|
||||||
func testMD5AllSlow(t *testing.T, f *Fs) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fsResult := deriveFs(ctx, t, f, "md5all", settings{
|
|
||||||
"chunk_size": "1P",
|
|
||||||
"name_format": "*.#",
|
|
||||||
"hash_type": "md5all",
|
|
||||||
"transactions": "rename",
|
|
||||||
"meta_format": "simplejson",
|
|
||||||
})
|
|
||||||
chunkFs, ok := fsResult.(*Fs)
|
|
||||||
require.True(t, ok, "fs must be a chunker remote")
|
|
||||||
baseFs := chunkFs.base
|
|
||||||
if !baseFs.Features().SlowHash {
|
|
||||||
t.Skipf("this test needs a base fs with slow hash, e.g. local")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.True(t, chunkFs.useMD5, "must use md5")
|
|
||||||
assert.True(t, chunkFs.hashAll, "must hash all files")
|
|
||||||
|
|
||||||
_ = testPutFile(ctx, t, chunkFs, "file", "-", "error", true)
|
|
||||||
obj, err := chunkFs.NewObject(ctx, "file")
|
|
||||||
require.NoError(t, err)
|
|
||||||
sum, err := obj.Hash(ctx, hash.MD5)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, "336d5ebc5436534e61d16e63ddfca327", sum)
|
|
||||||
|
|
||||||
list, err := baseFs.List(ctx, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 2, len(list))
|
|
||||||
_, err = baseFs.NewObject(ctx, "file")
|
|
||||||
assert.NoError(t, err, "metadata must be created")
|
|
||||||
_, err = baseFs.NewObject(ctx, "file.1")
|
|
||||||
assert.NoError(t, err, "first chunk must be created")
|
|
||||||
|
|
||||||
require.NoError(t, operations.Purge(ctx, baseFs, ""))
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
// InternalTest dispatches all internal tests
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
t.Run("PutLarge", func(t *testing.T) {
|
||||||
@@ -931,9 +876,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
t.Run("ChunkerServerSideMove", func(t *testing.T) {
|
||||||
testChunkerServerSideMove(t, f)
|
testChunkerServerSideMove(t, f)
|
||||||
})
|
})
|
||||||
t.Run("MD5AllSlow", func(t *testing.T) {
|
|
||||||
testMD5AllSlow(t, f)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -35,12 +35,10 @@ func TestIntegration(t *testing.T) {
|
|||||||
"MimeType",
|
"MimeType",
|
||||||
"GetTier",
|
"GetTier",
|
||||||
"SetTier",
|
"SetTier",
|
||||||
"Metadata",
|
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{
|
UnimplementableFsMethods: []string{
|
||||||
"PublicLink",
|
"PublicLink",
|
||||||
"OpenWriterAt",
|
"OpenWriterAt",
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
"MergeDirs",
|
||||||
"DirCacheFlush",
|
"DirCacheFlush",
|
||||||
"UserInfo",
|
"UserInfo",
|
||||||
@@ -55,7 +53,6 @@ func TestIntegration(t *testing.T) {
|
|||||||
{Name: name, Key: "type", Value: "chunker"},
|
{Name: name, Key: "type", Value: "chunker"},
|
||||||
{Name: name, Key: "remote", Value: tempDir},
|
{Name: name, Key: "remote", Value: tempDir},
|
||||||
}
|
}
|
||||||
opt.QuickTestOK = true
|
|
||||||
}
|
}
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,94 +0,0 @@
|
|||||||
package combine
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAdjustmentDo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "mountpoint/path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "wrongpath/to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.do(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAdjustmentUndo(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
root string
|
|
||||||
mountpoint string
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
wantErr error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "mountpoint/path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "path/to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "mountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "path/to/file.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "wrongmountpoint/path",
|
|
||||||
mountpoint: "mountpoint",
|
|
||||||
in: "to/file.txt",
|
|
||||||
want: "",
|
|
||||||
wantErr: errNotUnderRoot,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%+v", test)
|
|
||||||
a := newAdjustment(test.root, test.mountpoint)
|
|
||||||
got, gotErr := a.undo(test.in)
|
|
||||||
assert.Equal(t, test.wantErr, gotErr)
|
|
||||||
assert.Equal(t, test.want, got, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
// Test Combine filesystem interface
|
|
||||||
package combine_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
_ "github.com/rclone/rclone/backend/memory"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
|
||||||
unimplementableObjectMethods = []string{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLocal(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 3)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=" + dirs[2]
|
|
||||||
name := "TestCombineLocal"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMemory(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
upstreams := "dir1=:memory:dir1 dir2=:memory:dir2 dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMemory"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
QuickTestOK: true,
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMixed(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
dirs := MakeTestDirs(t, 2)
|
|
||||||
upstreams := "dir1=" + dirs[0] + " dir2=" + dirs[1] + " dir3=:memory:dir3"
|
|
||||||
name := "TestCombineMixed"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":dir1",
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "combine"},
|
|
||||||
{Name: name, Key: "upstreams", Value: upstreams},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: unimplementableFsMethods,
|
|
||||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeTestDirs makes directories in /tmp for testing
|
|
||||||
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
|
|
||||||
for i := 1; i <= n; i++ {
|
|
||||||
dir := t.TempDir()
|
|
||||||
dirs = append(dirs, dir)
|
|
||||||
}
|
|
||||||
return dirs
|
|
||||||
}
|
|
||||||
@@ -10,11 +10,10 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -22,6 +21,7 @@ import (
|
|||||||
"github.com/buengese/sgzip"
|
"github.com/buengese/sgzip"
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
"github.com/rclone/rclone/fs/chunkedreader"
|
||||||
@@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
"github.com/rclone/rclone/fs/fspath"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
@@ -37,7 +36,7 @@ import (
|
|||||||
// Globals
|
// Globals
|
||||||
const (
|
const (
|
||||||
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
|
||||||
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
|
maxChunkSize = 8388608 // at 256KB and 8 MB.
|
||||||
|
|
||||||
bufferSize = 8388608
|
bufferSize = 8388608
|
||||||
heuristicBytes = 1048576
|
heuristicBytes = 1048576
|
||||||
@@ -54,7 +53,7 @@ const (
|
|||||||
Gzip = 2
|
Gzip = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
@@ -71,9 +70,6 @@ func init() {
|
|||||||
Name: "compress",
|
Name: "compress",
|
||||||
Description: "Compress a remote",
|
Description: "Compress a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to compress.",
|
Help: "Remote to compress.",
|
||||||
@@ -87,23 +83,23 @@ func init() {
|
|||||||
Name: "level",
|
Name: "level",
|
||||||
Help: `GZIP compression level (-2 to 9).
|
Help: `GZIP compression level (-2 to 9).
|
||||||
|
|
||||||
Generally -1 (default, equivalent to 5) is recommended.
|
Generally -1 (default, equivalent to 5) is recommended.
|
||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
Levels 1 to 9 increase compressiong at the cost of speed.. Going past 6
|
||||||
generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
Level -2 uses Huffmann encoding only. Only use if you now what you
|
||||||
are doing.
|
are doing
|
||||||
Level 0 turns off compression.`,
|
Level 0 turns off compression.`,
|
||||||
Default: sgzip.DefaultCompression,
|
Default: sgzip.DefaultCompression,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "ram_cache_limit",
|
Name: "ram_cache_limit",
|
||||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||||
In this case the compressed file will need to be cached to determine
|
In this case the compressed file will need to be cached to determine
|
||||||
it's size.
|
it's size.
|
||||||
|
|
||||||
Files smaller than this limit will be cached in RAM, files larger than
|
Files smaller than this limit will be cached in RAM, file larger than
|
||||||
this limit will be cached on disk.`,
|
this limit will be cached on disk`,
|
||||||
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
Default: fs.SizeSuffix(20 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
@@ -131,7 +127,7 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -147,7 +143,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
|
|
||||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err)
|
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip trailing slashes if they exist in rpath
|
// Strip trailing slashes if they exist in rpath
|
||||||
@@ -162,7 +158,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig)
|
||||||
}
|
}
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the wrapping fs
|
// Create the wrapping fs
|
||||||
@@ -173,13 +169,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionModeFromName(opt.CompressionMode),
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -191,10 +180,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// We support reading MIME types no matter the wrapped fs
|
// We support reading MIME types no matter the wrapped fs
|
||||||
f.features.ReadMimeType = true
|
f.features.ReadMimeType = true
|
||||||
@@ -237,7 +222,7 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
|||||||
// Separate the filename and size from the extension
|
// Separate the filename and size from the extension
|
||||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||||
if extensionPos == -1 {
|
if extensionPos == -1 {
|
||||||
return "", "", 0, errors.New("file name has no extension")
|
return "", "", 0, errors.New("File name has no extension")
|
||||||
}
|
}
|
||||||
extension = compressedFileName[extensionPos:]
|
extension = compressedFileName[extensionPos:]
|
||||||
nameWithSize := compressedFileName[:extensionPos]
|
nameWithSize := compressedFileName[:extensionPos]
|
||||||
@@ -246,11 +231,11 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
|||||||
}
|
}
|
||||||
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
match := nameRegexp.FindStringSubmatch(nameWithSize)
|
||||||
if match == nil || len(match) != 3 {
|
if match == nil || len(match) != 3 {
|
||||||
return "", "", 0, errors.New("invalid filename")
|
return "", "", 0, errors.New("Invalid filename")
|
||||||
}
|
}
|
||||||
size, err := base64ToInt64(match[2])
|
size, err := base64ToInt64(match[2])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, errors.New("could not decode size")
|
return "", "", 0, errors.New("Could not decode size")
|
||||||
}
|
}
|
||||||
return match[1], gzFileExt, size, nil
|
return match[1], gzFileExt, size, nil
|
||||||
}
|
}
|
||||||
@@ -265,16 +250,6 @@ func isMetadataFile(filename string) bool {
|
|||||||
return strings.HasSuffix(filename, metaFileExt)
|
return strings.HasSuffix(filename, metaFileExt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks whether a file is a metadata file and returns the original
|
|
||||||
// file name and a flag indicating whether it was a metadata file or
|
|
||||||
// not.
|
|
||||||
func unwrapMetadataFile(filename string) (string, bool) {
|
|
||||||
if !isMetadataFile(filename) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return filename[:len(filename)-len(metaFileExt)], true
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
if mode != Uncompressed {
|
if mode != Uncompressed {
|
||||||
@@ -329,7 +304,7 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(&newEntries, x)
|
f.addDir(&newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -386,16 +361,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
meta, err := readMetadata(ctx, mo)
|
meta := readMetadata(ctx, mo)
|
||||||
if err != nil {
|
if meta == nil {
|
||||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
return nil, errors.New("error decoding metadata")
|
||||||
}
|
}
|
||||||
// Create our Object
|
// Create our Object
|
||||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||||
if err != nil {
|
return f.newObject(o, mo, meta), err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObject(o, mo, meta), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||||
@@ -429,10 +401,6 @@ func isCompressible(r io.Reader) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
ratio := float64(n) / float64(b.Len())
|
||||||
return ratio > minCompressionRatio, nil
|
return ratio > minCompressionRatio, nil
|
||||||
}
|
}
|
||||||
@@ -442,7 +410,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
dstHash, err := o.Hash(ctx, ht)
|
dstHash, err := o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to read destination hash: %w", err)
|
return errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
@@ -450,7 +418,7 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
return errors.Errorf("corrupted on transfer: %v compressed hashes differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -473,7 +441,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to include what we already read
|
// Need to include what we allready read
|
||||||
in = &ReadCloserWrapper{
|
in = &ReadCloserWrapper{
|
||||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||||
Closer: in,
|
Closer: in,
|
||||||
@@ -486,7 +454,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
}
|
}
|
||||||
|
|
||||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||||
defer func() {
|
defer func() {
|
||||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||||
// to ignore them
|
// to ignore them
|
||||||
@@ -494,10 +462,10 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err)
|
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
||||||
}
|
}
|
||||||
if _, err = io.Copy(tempFile, in); err != nil {
|
if _, err = io.Copy(tempFile, in); err != nil {
|
||||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
return nil, errors.Wrap(err, "Failed to write temporary local file")
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -564,8 +532,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the data
|
// Transfer the data
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if o != nil {
|
if o != nil {
|
||||||
removeErr := o.Remove(ctx)
|
removeErr := o.Remove(ctx)
|
||||||
@@ -658,11 +626,9 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
|
|||||||
// Put the data
|
// Put the data
|
||||||
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if mo != nil {
|
removeErr := mo.Remove(ctx)
|
||||||
removeErr := mo.Remove(ctx)
|
if removeErr != nil {
|
||||||
if removeErr != nil {
|
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
||||||
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -699,7 +665,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(dataObject, mo, meta), nil
|
return f.newObject(dataObject, mo, meta), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
@@ -748,23 +714,23 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) {
|
||||||
err = oldObj.(*Object).Object.Remove(ctx)
|
err = oldObj.(*Object).Object.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't remove original object: %w", err)
|
return nil, errors.Wrap(err, "Could remove original object")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If our new object is compressed we have to rename it with the correct size.
|
// If our new object is compressed we have to rename it with the correct size.
|
||||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||||
if compressible {
|
if compressible {
|
||||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename streamed object: %w", err)
|
return nil, errors.Wrap(err, "Couldn't rename streamed Object.")
|
||||||
}
|
}
|
||||||
newObj.Object = wrapObj
|
newObj.Object = wrapObj
|
||||||
}
|
}
|
||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||||
// will break stuff. Right no I can't think of a way to make this work.
|
// will break stuff. Right no I can't think of a way to make this work.
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
@@ -807,9 +773,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -857,9 +823,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -934,7 +900,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp: not supported by underlying remote")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -943,7 +909,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("can't About: not supported by underlying remote")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -997,8 +963,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||||
var (
|
var (
|
||||||
wrappedPath string
|
wrappedPath string
|
||||||
isMetadataFile bool
|
|
||||||
)
|
)
|
||||||
switch entryType {
|
switch entryType {
|
||||||
case fs.EntryDirectory:
|
case fs.EntryDirectory:
|
||||||
@@ -1006,10 +971,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
case fs.EntryObject:
|
case fs.EntryObject:
|
||||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
wrappedPath = makeMetadataName(path)
|
||||||
if !isMetadataFile {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||||
return
|
return
|
||||||
@@ -1066,19 +1028,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This function will read the metadata from a metadata object.
|
// This function will read the metadata from a metadata object.
|
||||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||||
// Open our meradata object
|
// Open our meradata object
|
||||||
rc, err := mo.Open(ctx)
|
rc, err := mo.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(rc, &err)
|
defer func() {
|
||||||
|
err := rc.Close()
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(mo, "Error closing object: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
jr := json.NewDecoder(rc)
|
jr := json.NewDecoder(rc)
|
||||||
meta = new(ObjectMetadata)
|
meta = new(ObjectMetadata)
|
||||||
if err = jr.Decode(meta); err != nil {
|
if err = jr.Decode(meta); err != nil {
|
||||||
return nil, err
|
return nil
|
||||||
}
|
}
|
||||||
return meta, nil
|
return meta
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes this object
|
// Remove removes this object
|
||||||
@@ -1123,9 +1090,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
origName := o.Remote()
|
origName := o.Remote()
|
||||||
if o.meta.Mode != Uncompressed || compressible {
|
if o.meta.Mode != Uncompressed || compressible {
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if newObject.Object.Remote() != o.Object.Remote() {
|
if newObject.Object.Remote() != o.Object.Remote() {
|
||||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||||
return removeErr
|
return removeErr
|
||||||
@@ -1139,9 +1103,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
// If we are, just update the object and metadata
|
// If we are, just update the object and metadata
|
||||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||||
if err != nil {
|
}
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
}
|
}
|
||||||
// Update object metadata and return
|
// Update object metadata and return
|
||||||
o.Object = newObject.Object
|
o.Object = newObject.Object
|
||||||
@@ -1152,9 +1116,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1167,9 +1128,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
|||||||
|
|
||||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||||
if o == nil {
|
|
||||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
|
||||||
}
|
|
||||||
return &Object{
|
return &Object{
|
||||||
Object: o,
|
Object: o,
|
||||||
f: f,
|
f: f,
|
||||||
@@ -1197,7 +1155,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta, err = readMetadata(ctx, o.mo)
|
o.meta = readMetadata(ctx, o.mo)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1250,21 +1208,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
return o.meta.MimeType
|
return o.meta.MimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
err := o.loadMetadataIfNotLoaded(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
do, ok := o.mo.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
@@ -1317,7 +1260,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return o.Object.Open(ctx, options...)
|
return o.Object.Open(ctx, options...)
|
||||||
}
|
}
|
||||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
var openOptions []fs.OpenOption = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -1411,51 +1354,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|||||||
return "", nil // cannot know the checksum
|
return "", nil // cannot know the checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *ObjectInfo) ID() string {
|
|
||||||
do, ok := o.src.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
|
||||||
do, ok := o.src.(fs.MimeTyper)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.MimeType(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
|
||||||
return fs.UnWrapObjectInfo(o.src)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.src.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *ObjectInfo) GetTier() string {
|
|
||||||
do, ok := o.src.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
func (o *Object) ID() string {
|
func (o *Object) ID() string {
|
||||||
do, ok := o.Object.(fs.IDer)
|
do, ok := o.Object.(fs.IDer)
|
||||||
@@ -1508,6 +1406,11 @@ var (
|
|||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.MimeTyper = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -14,26 +14,23 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultOpt = fstests.Opt{
|
|
||||||
RemoteName: "TestCompress:",
|
|
||||||
NilObject: (*Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"OpenChunkWriter",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"PutUnchecked",
|
|
||||||
"PutStream",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &defaultOpt)
|
opt := fstests.Opt{
|
||||||
|
RemoteName: *fstest.RemoteName,
|
||||||
|
NilObject: (*Object)(nil),
|
||||||
|
UnimplementableFsMethods: []string{
|
||||||
|
"OpenWriterAt",
|
||||||
|
"MergeDirs",
|
||||||
|
"DirCacheFlush",
|
||||||
|
"PutUnchecked",
|
||||||
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||||
|
UnimplementableObjectMethods: []string{}}
|
||||||
|
fstests.Run(t, &opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRemoteGzip tests GZIP compression
|
// TestRemoteGzip tests GZIP compression
|
||||||
@@ -43,13 +40,26 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||||
name := "TestCompressGzip"
|
name := "TestCompressGzip"
|
||||||
opt := defaultOpt
|
fstests.Run(t, &fstests.Opt{
|
||||||
opt.RemoteName = name + ":"
|
RemoteName: name + ":",
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
NilObject: (*Object)(nil),
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
UnimplementableFsMethods: []string{
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
"OpenWriterAt",
|
||||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
"MergeDirs",
|
||||||
}
|
"DirCacheFlush",
|
||||||
opt.QuickTestOK = true
|
"PutUnchecked",
|
||||||
fstests.Run(t, &opt)
|
"PutStream",
|
||||||
|
"UserInfo",
|
||||||
|
"Disconnect",
|
||||||
|
},
|
||||||
|
UnimplementableObjectMethods: []string{
|
||||||
|
"GetTier",
|
||||||
|
"SetTier",
|
||||||
|
},
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,22 +7,17 @@ import (
|
|||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/Max-Sum/base32768"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/rclone/rclone/lib/version"
|
|
||||||
"github.com/rfjakob/eme"
|
"github.com/rfjakob/eme"
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
@@ -38,6 +33,7 @@ const (
|
|||||||
blockHeaderSize = secretbox.Overhead
|
blockHeaderSize = secretbox.Overhead
|
||||||
blockDataSize = 64 * 1024
|
blockDataSize = 64 * 1024
|
||||||
blockSize = blockHeaderSize + blockDataSize
|
blockSize = blockHeaderSize + blockDataSize
|
||||||
|
encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file
|
||||||
)
|
)
|
||||||
|
|
||||||
// Errors returned by cipher
|
// Errors returned by cipher
|
||||||
@@ -53,9 +49,8 @@ var (
|
|||||||
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?")
|
||||||
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
ErrorBadBase32Encoding = errors.New("bad base32 filename encoding")
|
||||||
ErrorFileClosed = errors.New("file already closed")
|
ErrorFileClosed = errors.New("file already closed")
|
||||||
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - does not match suffix")
|
ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix")
|
||||||
ErrorBadSeek = errors.New("Seek beyond end of file")
|
ErrorBadSeek = errors.New("Seek beyond end of file")
|
||||||
ErrorSuffixMissingDot = errors.New("suffix config setting should include a '.'")
|
|
||||||
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1}
|
||||||
obfuscQuoteRune = '!'
|
obfuscQuoteRune = '!'
|
||||||
)
|
)
|
||||||
@@ -97,12 +92,12 @@ func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) {
|
|||||||
case "obfuscate":
|
case "obfuscate":
|
||||||
mode = NameEncryptionObfuscated
|
mode = NameEncryptionObfuscated
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("unknown file name encryption mode %q", s)
|
err = errors.Errorf("Unknown file name encryption mode %q", s)
|
||||||
}
|
}
|
||||||
return mode, err
|
return mode, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// String turns mode into a human-readable string
|
// String turns mode into a human readable string
|
||||||
func (mode NameEncryptionMode) String() (out string) {
|
func (mode NameEncryptionMode) String() (out string) {
|
||||||
switch mode {
|
switch mode {
|
||||||
case NameEncryptionOff:
|
case NameEncryptionOff:
|
||||||
@@ -117,83 +112,27 @@ func (mode NameEncryptionMode) String() (out string) {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
|
||||||
type fileNameEncoding interface {
|
|
||||||
EncodeToString(src []byte) string
|
|
||||||
DecodeString(s string) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
|
||||||
// using a modified version of standard base32 as described in
|
|
||||||
// RFC4648
|
|
||||||
//
|
|
||||||
// The standard encoding is modified in two ways
|
|
||||||
// - it becomes lower case (no-one likes upper case filenames!)
|
|
||||||
// - we strip the padding character `=`
|
|
||||||
type caseInsensitiveBase32Encoding struct{}
|
|
||||||
|
|
||||||
// EncodeToString encodes a string using the modified version of
|
|
||||||
// base32 encoding.
|
|
||||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
|
||||||
encoded := base32.HexEncoding.EncodeToString(src)
|
|
||||||
encoded = strings.TrimRight(encoded, "=")
|
|
||||||
return strings.ToLower(encoded)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeString decodes a string as encoded by EncodeToString
|
|
||||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
|
||||||
if strings.HasSuffix(s, "=") {
|
|
||||||
return nil, ErrorBadBase32Encoding
|
|
||||||
}
|
|
||||||
// First figure out how many padding characters to add
|
|
||||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
|
||||||
equals := roundUpToMultipleOf8 - len(s)
|
|
||||||
s = strings.ToUpper(s) + "========"[:equals]
|
|
||||||
return base32.HexEncoding.DecodeString(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNameEncoding creates a NameEncoding from a string
|
|
||||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
|
||||||
s = strings.ToLower(s)
|
|
||||||
switch s {
|
|
||||||
case "base32":
|
|
||||||
enc = caseInsensitiveBase32Encoding{}
|
|
||||||
case "base64":
|
|
||||||
enc = base64.RawURLEncoding
|
|
||||||
case "base32768":
|
|
||||||
enc = base32768.SafeEncoding
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("unknown file name encoding mode %q", s)
|
|
||||||
}
|
|
||||||
return enc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||||
type Cipher struct {
|
type Cipher struct {
|
||||||
dataKey [32]byte // Key for secretbox
|
dataKey [32]byte // Key for secretbox
|
||||||
nameKey [32]byte // 16,24 or 32 bytes
|
nameKey [32]byte // 16,24 or 32 bytes
|
||||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||||
block gocipher.Block
|
block gocipher.Block
|
||||||
mode NameEncryptionMode
|
mode NameEncryptionMode
|
||||||
fileNameEnc fileNameEncoding
|
buffers sync.Pool // encrypt/decrypt buffers
|
||||||
buffers sync.Pool // encrypt/decrypt buffers
|
cryptoRand io.Reader // read crypto random numbers from here
|
||||||
cryptoRand io.Reader // read crypto random numbers from here
|
dirNameEncrypt bool
|
||||||
dirNameEncrypt bool
|
|
||||||
passBadBlocks bool // if set passed bad blocks as zeroed blocks
|
|
||||||
encryptedSuffix string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||||
c := &Cipher{
|
c := &Cipher{
|
||||||
mode: mode,
|
mode: mode,
|
||||||
fileNameEnc: enc,
|
cryptoRand: rand.Reader,
|
||||||
cryptoRand: rand.Reader,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
dirNameEncrypt: dirNameEncrypt,
|
|
||||||
encryptedSuffix: ".bin",
|
|
||||||
}
|
}
|
||||||
c.buffers.New = func() interface{} {
|
c.buffers.New = func() interface{} {
|
||||||
return new([blockSize]byte)
|
return make([]byte, blockSize)
|
||||||
}
|
}
|
||||||
err := c.Key(password, salt)
|
err := c.Key(password, salt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -202,29 +141,11 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setEncryptedSuffix set suffix, or an empty string
|
|
||||||
func (c *Cipher) setEncryptedSuffix(suffix string) {
|
|
||||||
if strings.EqualFold(suffix, "none") {
|
|
||||||
c.encryptedSuffix = ""
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(suffix, ".") {
|
|
||||||
fs.Errorf(nil, "crypt: bad suffix: %v", ErrorSuffixMissingDot)
|
|
||||||
suffix = "." + suffix
|
|
||||||
}
|
|
||||||
c.encryptedSuffix = suffix
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call to set bad block pass through
|
|
||||||
func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
|
|
||||||
c.passBadBlocks = passBadBlocks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key creates all the internal keys from the password passed in using
|
// Key creates all the internal keys from the password passed in using
|
||||||
// scrypt.
|
// scrypt.
|
||||||
//
|
//
|
||||||
// If salt is "" we use a fixed salt just to make attackers lives
|
// If salt is "" we use a fixed salt just to make attackers lives
|
||||||
// slightly harder than using no salt.
|
// slighty harder than using no salt.
|
||||||
//
|
//
|
||||||
// Note that empty password makes all 0x00 keys which is used in the
|
// Note that empty password makes all 0x00 keys which is used in the
|
||||||
// tests.
|
// tests.
|
||||||
@@ -252,18 +173,45 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getBlock gets a block from the pool of size blockSize
|
// getBlock gets a block from the pool of size blockSize
|
||||||
func (c *Cipher) getBlock() *[blockSize]byte {
|
func (c *Cipher) getBlock() []byte {
|
||||||
return c.buffers.Get().(*[blockSize]byte)
|
return c.buffers.Get().([]byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putBlock returns a block to the pool of size blockSize
|
// putBlock returns a block to the pool of size blockSize
|
||||||
func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
func (c *Cipher) putBlock(buf []byte) {
|
||||||
|
if len(buf) != blockSize {
|
||||||
|
panic("bad blocksize returned to pool")
|
||||||
|
}
|
||||||
c.buffers.Put(buf)
|
c.buffers.Put(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// encodeFileName encodes a filename using a modified version of
|
||||||
|
// standard base32 as described in RFC4648
|
||||||
|
//
|
||||||
|
// The standard encoding is modified in two ways
|
||||||
|
// * it becomes lower case (no-one likes upper case filenames!)
|
||||||
|
// * we strip the padding character `=`
|
||||||
|
func encodeFileName(in []byte) string {
|
||||||
|
encoded := base32.HexEncoding.EncodeToString(in)
|
||||||
|
encoded = strings.TrimRight(encoded, "=")
|
||||||
|
return strings.ToLower(encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||||
|
func decodeFileName(in string) ([]byte, error) {
|
||||||
|
if strings.HasSuffix(in, "=") {
|
||||||
|
return nil, ErrorBadBase32Encoding
|
||||||
|
}
|
||||||
|
// First figure out how many padding characters to add
|
||||||
|
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||||
|
equals := roundUpToMultipleOf8 - len(in)
|
||||||
|
in = strings.ToUpper(in) + "========"[:equals]
|
||||||
|
return base32.HexEncoding.DecodeString(in)
|
||||||
|
}
|
||||||
|
|
||||||
// encryptSegment encrypts a path segment
|
// encryptSegment encrypts a path segment
|
||||||
//
|
//
|
||||||
// This uses EME with AES.
|
// This uses EME with AES
|
||||||
//
|
//
|
||||||
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the
|
||||||
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
|
||||||
@@ -273,15 +221,15 @@ func (c *Cipher) putBlock(buf *[blockSize]byte) {
|
|||||||
// same filename must encrypt to the same thing.
|
// same filename must encrypt to the same thing.
|
||||||
//
|
//
|
||||||
// This means that
|
// This means that
|
||||||
// - filenames with the same name will encrypt the same
|
// * filenames with the same name will encrypt the same
|
||||||
// - filenames which start the same won't have a common prefix
|
// * filenames which start the same won't have a common prefix
|
||||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||||
if plaintext == "" {
|
if plaintext == "" {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
return encodeFileName(ciphertext)
|
||||||
}
|
}
|
||||||
|
|
||||||
// decryptSegment decrypts a path segment
|
// decryptSegment decrypts a path segment
|
||||||
@@ -289,7 +237,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
|||||||
if ciphertext == "" {
|
if ciphertext == "" {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
rawCiphertext, err := decodeFileName(ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -494,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets encrypted/obfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i] = c.encryptSegment(segments[i])
|
segments[i] = c.encryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
segments[i] = c.obfuscateSegment(segments[i])
|
segments[i] = c.obfuscateSegment(segments[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the encrypted/obfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/")
|
return strings.Join(segments, "/")
|
||||||
}
|
}
|
||||||
@@ -527,7 +454,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
|||||||
// EncryptFileName encrypts a file path
|
// EncryptFileName encrypts a file path
|
||||||
func (c *Cipher) EncryptFileName(in string) string {
|
func (c *Cipher) EncryptFileName(in string) string {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
return in + c.encryptedSuffix
|
return in + encryptedSuffix
|
||||||
}
|
}
|
||||||
return c.encryptFileName(in)
|
return c.encryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -550,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
if !c.dirNameEncrypt && i != (len(segments)-1) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip version string so that only the non-versioned part
|
|
||||||
// of the file name gets decrypted/deobfuscated
|
|
||||||
hasVersion := false
|
|
||||||
var t time.Time
|
|
||||||
if i == (len(segments)-1) && version.Match(segments[i]) {
|
|
||||||
var s string
|
|
||||||
t, s = version.Remove(segments[i])
|
|
||||||
// version.Remove can fail, in which case it returns segments[i]
|
|
||||||
if s != segments[i] {
|
|
||||||
segments[i] = s
|
|
||||||
hasVersion = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.mode == NameEncryptionStandard {
|
if c.mode == NameEncryptionStandard {
|
||||||
segments[i], err = c.decryptSegment(segments[i])
|
segments[i], err = c.decryptSegment(segments[i])
|
||||||
} else {
|
} else {
|
||||||
@@ -574,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back a version to the decrypted/deobfuscated
|
|
||||||
// file name, if we stripped it off earlier
|
|
||||||
if hasVersion {
|
|
||||||
segments[i] = version.Add(segments[i], t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(segments, "/"), nil
|
return strings.Join(segments, "/"), nil
|
||||||
}
|
}
|
||||||
@@ -587,19 +493,11 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
|||||||
// DecryptFileName decrypts a file path
|
// DecryptFileName decrypts a file path
|
||||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||||
if c.mode == NameEncryptionOff {
|
if c.mode == NameEncryptionOff {
|
||||||
remainingLength := len(in) - len(c.encryptedSuffix)
|
remainingLength := len(in) - len(encryptedSuffix)
|
||||||
if remainingLength == 0 || !strings.HasSuffix(in, c.encryptedSuffix) {
|
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||||
return "", ErrorNotAnEncryptedFile
|
return in[:remainingLength], nil
|
||||||
}
|
}
|
||||||
decrypted := in[:remainingLength]
|
return "", ErrorNotAnEncryptedFile
|
||||||
if version.Match(decrypted) {
|
|
||||||
_, unversioned := version.Remove(decrypted)
|
|
||||||
if unversioned == "" {
|
|
||||||
return "", ErrorNotAnEncryptedFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Leave the version string on, if it was there
|
|
||||||
return decrypted, nil
|
|
||||||
}
|
}
|
||||||
return c.decryptFileName(in)
|
return c.decryptFileName(in)
|
||||||
}
|
}
|
||||||
@@ -628,9 +526,9 @@ func (n *nonce) pointer() *[fileNonceSize]byte {
|
|||||||
// fromReader fills the nonce from an io.Reader - normally the OSes
|
// fromReader fills the nonce from an io.Reader - normally the OSes
|
||||||
// crypto random number generator
|
// crypto random number generator
|
||||||
func (n *nonce) fromReader(in io.Reader) error {
|
func (n *nonce) fromReader(in io.Reader) error {
|
||||||
read, err := readers.ReadFill(in, (*n)[:])
|
read, err := io.ReadFull(in, (*n)[:])
|
||||||
if read != fileNonceSize {
|
if read != fileNonceSize {
|
||||||
return fmt.Errorf("short read of nonce: %w", err)
|
return errors.Wrap(err, "short read of nonce")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -683,8 +581,8 @@ type encrypter struct {
|
|||||||
in io.Reader
|
in io.Reader
|
||||||
c *Cipher
|
c *Cipher
|
||||||
nonce nonce
|
nonce nonce
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -709,9 +607,9 @@ func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Copy magic into buffer
|
// Copy magic into buffer
|
||||||
copy((*fh.buf)[:], fileMagicBytes)
|
copy(fh.buf, fileMagicBytes)
|
||||||
// Copy nonce into buffer
|
// Copy nonce into buffer
|
||||||
copy((*fh.buf)[fileMagicSize:], fh.nonce[:])
|
copy(fh.buf[fileMagicSize:], fh.nonce[:])
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -726,20 +624,22 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.bufIndex >= fh.bufSize {
|
if fh.bufIndex >= fh.bufSize {
|
||||||
// Read data
|
// Read data
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := (*fh.readBuf)[:blockDataSize]
|
readBuf := fh.readBuf[:blockDataSize]
|
||||||
n, err = readers.ReadFill(fh.in, readBuf)
|
n, err = io.ReadFull(fh.in, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return fh.finish(err)
|
return fh.finish(err)
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the
|
// possibly err != nil here, but we will process the
|
||||||
// data and the next call to ReadFill will return 0, err
|
// data and the next call to ReadFull will return 0, err
|
||||||
// Encrypt the block using the nonce
|
// Encrypt the block using the nonce
|
||||||
secretbox.Seal((*fh.buf)[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = blockHeaderSize + n
|
fh.bufSize = blockHeaderSize + n
|
||||||
fh.nonce.increment()
|
fh.nonce.increment()
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufSize])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@@ -780,8 +680,8 @@ type decrypter struct {
|
|||||||
nonce nonce
|
nonce nonce
|
||||||
initialNonce nonce
|
initialNonce nonce
|
||||||
c *Cipher
|
c *Cipher
|
||||||
buf *[blockSize]byte
|
buf []byte
|
||||||
readBuf *[blockSize]byte
|
readBuf []byte
|
||||||
bufIndex int
|
bufIndex int
|
||||||
bufSize int
|
bufSize int
|
||||||
err error
|
err error
|
||||||
@@ -799,12 +699,12 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
limit: -1,
|
limit: -1,
|
||||||
}
|
}
|
||||||
// Read file header (magic + nonce)
|
// Read file header (magic + nonce)
|
||||||
readBuf := (*fh.readBuf)[:fileHeaderSize]
|
readBuf := fh.readBuf[:fileHeaderSize]
|
||||||
n, err := readers.ReadFill(fh.rc, readBuf)
|
_, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n < fileHeaderSize && err == io.EOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
// This read from 0..fileHeaderSize-1 bytes
|
// This read from 0..fileHeaderSize-1 bytes
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
return nil, fh.finishAndClose(ErrorEncryptedFileTooShort)
|
||||||
} else if err != io.EOF && err != nil {
|
} else if err != nil {
|
||||||
return nil, fh.finishAndClose(err)
|
return nil, fh.finishAndClose(err)
|
||||||
}
|
}
|
||||||
// check the magic
|
// check the magic
|
||||||
@@ -862,8 +762,10 @@ func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
|||||||
func (fh *decrypter) fillBuffer() (err error) {
|
func (fh *decrypter) fillBuffer() (err error) {
|
||||||
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
// FIXME should overlap the reads with a go-routine and 2 buffers?
|
||||||
readBuf := fh.readBuf
|
readBuf := fh.readBuf
|
||||||
n, err := readers.ReadFill(fh.rc, (*readBuf)[:])
|
n, err := io.ReadFull(fh.rc, readBuf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
|
// err can't be nil since:
|
||||||
|
// n == len(buf) if and only if err == nil.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// possibly err != nil here, but we will process the data and
|
// possibly err != nil here, but we will process the data and
|
||||||
@@ -871,25 +773,18 @@ func (fh *decrypter) fillBuffer() (err error) {
|
|||||||
|
|
||||||
// Check header + 1 byte exists
|
// Check header + 1 byte exists
|
||||||
if n <= blockHeaderSize {
|
if n <= blockHeaderSize {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
return ErrorEncryptedFileBadHeader
|
return ErrorEncryptedFileBadHeader
|
||||||
}
|
}
|
||||||
// Decrypt the block using the nonce
|
// Decrypt the block using the nonce
|
||||||
_, ok := secretbox.Open((*fh.buf)[:0], (*readBuf)[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
if err != nil && err != io.EOF {
|
if err != nil {
|
||||||
return err // return pending error as it is likely more accurate
|
return err // return pending error as it is likely more accurate
|
||||||
}
|
}
|
||||||
if !fh.c.passBadBlocks {
|
return ErrorEncryptedBadBlock
|
||||||
return ErrorEncryptedBadBlock
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "crypt: ignoring: %v", ErrorEncryptedBadBlock)
|
|
||||||
// Zero out the bad block and continue
|
|
||||||
for i := range (*fh.buf)[:n] {
|
|
||||||
(*fh.buf)[i] = 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fh.bufIndex = 0
|
fh.bufIndex = 0
|
||||||
fh.bufSize = n - blockHeaderSize
|
fh.bufSize = n - blockHeaderSize
|
||||||
@@ -915,7 +810,7 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||||
toCopy = int(fh.limit)
|
toCopy = int(fh.limit)
|
||||||
}
|
}
|
||||||
n = copy(p, (*fh.buf)[fh.bufIndex:fh.bufIndex+toCopy])
|
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||||
fh.bufIndex += n
|
fh.bufIndex += n
|
||||||
if fh.limit >= 0 {
|
if fh.limit >= 0 {
|
||||||
fh.limit -= int64(n)
|
fh.limit -= int64(n)
|
||||||
@@ -926,8 +821,9 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
|||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateUnderlying converts an (offset, limit) in an encrypted file
|
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||||
// into an (underlyingOffset, underlyingLimit) for the underlying file.
|
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||||
|
// file.
|
||||||
//
|
//
|
||||||
// It also returns number of bytes to discard after reading the first
|
// It also returns number of bytes to discard after reading the first
|
||||||
// block and number of blocks this is from the start so the nonce can
|
// block and number of blocks this is from the start so the nonce can
|
||||||
@@ -1008,7 +904,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(fmt.Errorf("couldn't reopen file with offset and limit: %w", err))
|
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the file handle
|
// Set the file handle
|
||||||
@@ -1106,7 +1002,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
|
|
||||||
// DecryptDataSeek decrypts the data stream from offset
|
// DecryptDataSeek decrypts the data stream from offset
|
||||||
//
|
//
|
||||||
// The open function must return a ReadCloser opened to the offset supplied.
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,13 +3,13 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
@@ -28,12 +28,9 @@ func init() {
|
|||||||
Description: "Encrypt/Decrypt a remote",
|
Description: "Encrypt/Decrypt a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
@@ -42,13 +39,13 @@ func init() {
|
|||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: "Encrypt the filenames.\nSee the docs for the details.",
|
Help: "Encrypt the filenames see the docs for the details.",
|
||||||
}, {
|
}, {
|
||||||
Value: "obfuscate",
|
Value: "obfuscate",
|
||||||
Help: "Very simple filename obfuscation.",
|
Help: "Very simple filename obfuscation.",
|
||||||
}, {
|
}, {
|
||||||
Value: "off",
|
Value: "off",
|
||||||
Help: "Don't encrypt the file names.\nAdds a \".bin\", or \"suffix\" extension only.",
|
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
@@ -74,14 +71,12 @@ NB If filename_encryption is "off" then this option will do nothing.`,
|
|||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "password2",
|
Name: "password2",
|
||||||
Help: "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
|
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "server_side_across_configs",
|
Name: "server_side_across_configs",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Deprecated: use --server-side-across-configs instead.
|
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
|
||||||
|
|
||||||
Allow server-side operations (e.g. copy) to work across different crypt configs.
|
|
||||||
|
|
||||||
Normally this option is not what you want, but if you have two crypts
|
Normally this option is not what you want, but if you have two crypts
|
||||||
pointing to the same backend you can use it.
|
pointing to the same backend you can use it.
|
||||||
@@ -121,46 +116,6 @@ names, or for debugging purposes.`,
|
|||||||
Help: "Encrypt file data.",
|
Help: "Encrypt file data.",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Name: "pass_bad_blocks",
|
|
||||||
Help: `If set this will pass bad blocks through as all 0.
|
|
||||||
|
|
||||||
This should not be set in normal operation, it should only be set if
|
|
||||||
trying to recover an encrypted file with errors and it is desired to
|
|
||||||
recover as much of the file as possible.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "filename_encoding",
|
|
||||||
Help: `How to encode the encrypted filename to text string.
|
|
||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
|
||||||
suitable option would depend on the way your remote count the filename
|
|
||||||
length and if it's case sensitive.`,
|
|
||||||
Default: "base32",
|
|
||||||
Examples: []fs.OptionExample{
|
|
||||||
{
|
|
||||||
Value: "base32",
|
|
||||||
Help: "Encode using base32. Suitable for all remote.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "base64",
|
|
||||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "base32768",
|
|
||||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive, Dropbox)",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "suffix",
|
|
||||||
Help: `If this is set it will override the default suffix of ".bin".
|
|
||||||
|
|
||||||
Setting suffix to "none" will result in an empty suffix. This may be useful
|
|
||||||
when the path length is critical.`,
|
|
||||||
Default: ".bin",
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -176,25 +131,19 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
|||||||
}
|
}
|
||||||
password, err := obscure.Reveal(opt.Password)
|
password, err := obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decrypt password: %w", err)
|
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||||
}
|
}
|
||||||
var salt string
|
var salt string
|
||||||
if opt.Password2 != "" {
|
if opt.Password2 != "" {
|
||||||
salt, err = obscure.Reveal(opt.Password2)
|
salt, err = obscure.Reveal(opt.Password2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "failed to make cipher")
|
||||||
}
|
}
|
||||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
|
||||||
}
|
|
||||||
cipher.setEncryptedSuffix(opt.Suffix)
|
|
||||||
cipher.setPassBadBlocks(opt.PassBadBlocks)
|
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -243,7 +192,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
@@ -253,17 +202,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
cache.PinUntilFinalized(f.Fs, f)
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
// ANDed with the ones from wrappedFs
|
// ANDed with the ones from wrappedFs
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
@@ -272,10 +214,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
SetTier: true,
|
SetTier: true,
|
||||||
GetTier: true,
|
GetTier: true,
|
||||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
@@ -291,9 +229,6 @@ type Options struct {
|
|||||||
Password2 string `config:"password2"`
|
Password2 string `config:"password2"`
|
||||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||||
ShowMapping bool `config:"show_mapping"`
|
ShowMapping bool `config:"show_mapping"`
|
||||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
|
||||||
FilenameEncoding string `config:"filename_encoding"`
|
|
||||||
Suffix string `config:"suffix"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
@@ -365,7 +300,7 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(ctx, &newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEntries, nil
|
return newEntries, nil
|
||||||
@@ -427,14 +362,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
|||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
|
|
||||||
if f.opt.NoDataEncryption {
|
if f.opt.NoDataEncryption {
|
||||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||||
if err == nil && o != nil {
|
|
||||||
o = f.newObject(o)
|
|
||||||
}
|
|
||||||
return o, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
@@ -446,9 +375,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
// Find a hash the destination supports to compute a hash of
|
// Find a hash the destination supports to compute a hash of
|
||||||
// the encrypted data
|
// the encrypted data
|
||||||
ht := f.Fs.Hashes().GetOne()
|
ht := f.Fs.Hashes().GetOne()
|
||||||
if ci.IgnoreChecksum {
|
|
||||||
ht = hash.None
|
|
||||||
}
|
|
||||||
var hasher *hash.MultiHasher
|
var hasher *hash.MultiHasher
|
||||||
if ht != hash.None {
|
if ht != hash.None {
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
@@ -476,7 +402,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ctx, ht)
|
dstHash, err = o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to read destination hash: %w", err)
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" {
|
if srcHash != "" && dstHash != "" {
|
||||||
if srcHash != dstHash {
|
if srcHash != dstHash {
|
||||||
@@ -485,7 +411,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
|
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||||
}
|
}
|
||||||
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
||||||
}
|
}
|
||||||
@@ -543,9 +469,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -568,9 +494,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -639,7 +565,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("not supported by underlying remote")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -648,7 +574,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("not supported by underlying remote")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do(ctx)
|
||||||
}
|
}
|
||||||
@@ -686,24 +612,24 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
|
|||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err := src.Open(ctx)
|
in, err := src.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to open src: %w", err)
|
return "", errors.Wrap(err, "failed to open src")
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(in, &err)
|
defer fs.CheckClose(in, &err)
|
||||||
|
|
||||||
// Now encrypt the src with the nonce
|
// Now encrypt the src with the nonce
|
||||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to make encrypter: %w", err)
|
return "", errors.Wrap(err, "failed to make encrypter")
|
||||||
}
|
}
|
||||||
|
|
||||||
// pipe into hash
|
// pipe into hash
|
||||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to make hasher: %w", err)
|
return "", errors.Wrap(err, "failed to make hasher")
|
||||||
}
|
}
|
||||||
_, err = io.Copy(m, out)
|
_, err = io.Copy(m, out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to hash data: %w", err)
|
return "", errors.Wrap(err, "failed to hash data")
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.Sums()[hashType], nil
|
return m.Sums()[hashType], nil
|
||||||
@@ -722,12 +648,12 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
d, err := f.cipher.newDecrypter(in)
|
d, err := f.cipher.newDecrypter(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
return "", fmt.Errorf("failed to open object to read nonce: %w", err)
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
nonce := d.nonce
|
nonce := d.nonce
|
||||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||||
@@ -746,7 +672,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
// Close d (and hence in) once we have read the nonce
|
// Close d (and hence in) once we have read the nonce
|
||||||
err = d.Close()
|
err = d.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to close nonce read: %w", err)
|
return "", errors.Wrap(err, "failed to close nonce read")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||||
@@ -865,7 +791,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
for _, encryptedFileName := range arg {
|
for _, encryptedFileName := range arg {
|
||||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return out, fmt.Errorf("failed to decrypt: %s: %w", encryptedFileName, err)
|
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||||
}
|
}
|
||||||
out = append(out, fileName)
|
out = append(out, fileName)
|
||||||
}
|
}
|
||||||
@@ -1069,9 +995,6 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
if size < 0 {
|
if size < 0 {
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
if o.f.opt.NoDataEncryption {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
return o.f.cipher.EncryptedSize(size)
|
return o.f.cipher.EncryptedSize(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1083,11 +1006,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
// Get the underlying object if there is one
|
// Get the underlying object if there is one
|
||||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||||
// Prefer direct interface assertion
|
// Prefer direct interface assertion
|
||||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||||
// Unwrap if it is an operations.OverrideRemote
|
// Otherwise likely is an operations.OverrideRemote
|
||||||
srcObj = do.UnWrap()
|
srcObj = do.UnWrap()
|
||||||
} else {
|
} else {
|
||||||
// Otherwise don't unwrap any further
|
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
// if this is wrapping a local object then we work out the hash
|
// if this is wrapping a local object then we work out the hash
|
||||||
@@ -1099,50 +1021,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *ObjectInfo) GetTier() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *ObjectInfo) ID() string {
|
|
||||||
do, ok := o.ObjectInfo.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.ObjectInfo.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *ObjectInfo) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Object that this Object is wrapping or
|
|
||||||
// nil if it isn't wrapping anything
|
|
||||||
func (o *ObjectInfo) UnWrap() fs.Object {
|
|
||||||
return fs.UnWrapObjectInfo(o.ObjectInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
func (o *Object) ID() string {
|
func (o *Object) ID() string {
|
||||||
do, ok := o.Object.(fs.IDer)
|
do, ok := o.Object.(fs.IDer)
|
||||||
@@ -1171,26 +1049,6 @@ func (o *Object) GetTier() string {
|
|||||||
return do.GetTier()
|
return do.GetTier()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType returns the content type of the Object if
|
|
||||||
// known, or "" if not
|
|
||||||
//
|
|
||||||
// This is deliberately unsupported so we don't leak mime type info by
|
|
||||||
// default.
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -1213,6 +1071,10 @@ var (
|
|||||||
_ fs.UserInfoer = (*Fs)(nil)
|
_ fs.UserInfoer = (*Fs)(nil)
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
_ fs.Disconnecter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
_ fs.FullObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.FullObject = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
|
_ fs.IDer = (*Object)(nil)
|
||||||
|
_ fs.SetTierer = (*Object)(nil)
|
||||||
|
_ fs.GetTierer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -17,28 +17,41 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type testWrapper struct {
|
||||||
|
fs.ObjectInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||||
|
// isn't wrapping anything
|
||||||
|
func (o testWrapper) UnWrap() fs.Object {
|
||||||
|
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create a temporary local fs to upload things from
|
// Create a temporary local fs to upload things from
|
||||||
|
|
||||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||||
})
|
}
|
||||||
return localFs
|
return localFs, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload a file to a remote
|
// Upload a file to a remote
|
||||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() {
|
cleanup = func() {
|
||||||
require.NoError(t, obj.Remove(context.Background()))
|
require.NoError(t, obj.Remove(context.Background()))
|
||||||
})
|
}
|
||||||
return obj
|
return obj, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test the ObjectInfo
|
// Test the ObjectInfo
|
||||||
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
path = "_wrap"
|
path = "_wrap"
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
obj := uploadFile(t, localFs, path, contents)
|
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupObj()
|
||||||
|
|
||||||
// encrypt the data
|
// encrypt the data
|
||||||
inBuf := bytes.NewBufferString(contents)
|
inBuf := bytes.NewBufferString(contents)
|
||||||
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
var oi fs.ObjectInfo = obj
|
var oi fs.ObjectInfo = obj
|
||||||
if wrap {
|
if wrap {
|
||||||
// wrap the object in an fs.ObjectUnwrapper if required
|
// wrap the object in an fs.ObjectUnwrapper if required
|
||||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
oi = testWrapper{oi}
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap the object in a crypt for upload using the nonce we
|
// wrap the object in a crypt for upload using the nonce we
|
||||||
@@ -76,9 +91,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
|||||||
src := f.newObjectInfo(oi, nonce)
|
src := f.newObjectInfo(oi, nonce)
|
||||||
|
|
||||||
// Test ObjectInfo methods
|
// Test ObjectInfo methods
|
||||||
if !f.opt.NoDataEncryption {
|
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
|
||||||
}
|
|
||||||
assert.Equal(t, f, src.Fs())
|
assert.Equal(t, f, src.Fs())
|
||||||
assert.NotEqual(t, path, src.Remote())
|
assert.NotEqual(t, path, src.Remote())
|
||||||
|
|
||||||
@@ -101,13 +114,16 @@ func testComputeHash(t *testing.T, f *Fs) {
|
|||||||
t.Skipf("%v: does not support hashes", f.Fs)
|
t.Skipf("%v: does not support hashes", f.Fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
localFs := makeTempLocalFs(t)
|
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||||
|
defer cleanupLocalFs()
|
||||||
|
|
||||||
// Upload a file to localFs as a test object
|
// Upload a file to localFs as a test object
|
||||||
localObj := uploadFile(t, localFs, path, contents)
|
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||||
|
defer cleanupLocalObj()
|
||||||
|
|
||||||
// Upload the same data to the remote Fs also
|
// Upload the same data to the remote Fs also
|
||||||
remoteObj := uploadFile(t, f, path, contents)
|
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||||
|
defer cleanupRemoteObj()
|
||||||
|
|
||||||
// Calculate the expected Hash of the remote object
|
// Calculate the expected Hash of the remote object
|
||||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package crypt_test
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/rclone/rclone/backend/crypt"
|
||||||
@@ -24,13 +23,13 @@ func TestIntegration(t *testing.T) {
|
|||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: *fstest.RemoteName,
|
RemoteName: *fstest.RemoteName,
|
||||||
NilObject: (*crypt.Object)(nil),
|
NilObject: (*crypt.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestStandard runs integration tests against the remote
|
// TestStandard runs integration tests against the remote
|
||||||
func TestStandardBase32(t *testing.T) {
|
func TestStandard(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
@@ -45,53 +44,8 @@ func TestStandardBase32(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStandardBase64(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
|
||||||
name := "TestCrypt"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "crypt"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
|
||||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStandardBase32768(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
|
||||||
name := "TestCrypt"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "crypt"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
|
||||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,9 +65,8 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,9 +75,6 @@ func TestObfuscate(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt3"
|
name := "TestCrypt3"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -137,9 +87,8 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,9 +97,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("Skipping as -remote set")
|
t.Skip("Skipping as -remote set")
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
t.Skip("Skipping on macOS as obfuscating control characters makes filenames macOS can't cope with")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt4"
|
name := "TestCrypt4"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -164,8 +110,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
UnimplementableObjectMethods: []string{"MimeType"},
|
||||||
QuickTestOK: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,15 +4,15 @@
|
|||||||
// buffers which are a multiple of an underlying crypto block size.
|
// buffers which are a multiple of an underlying crypto block size.
|
||||||
package pkcs7
|
package pkcs7
|
||||||
|
|
||||||
import "errors"
|
import "github.com/pkg/errors"
|
||||||
|
|
||||||
// Errors Unpad can return
|
// Errors Unpad can return
|
||||||
var (
|
var (
|
||||||
ErrorPaddingNotFound = errors.New("bad PKCS#7 padding - not padded")
|
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
|
||||||
ErrorPaddingNotAMultiple = errors.New("bad PKCS#7 padding - not a multiple of blocksize")
|
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
|
||||||
ErrorPaddingTooLong = errors.New("bad PKCS#7 padding - too long")
|
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
|
||||||
ErrorPaddingTooShort = errors.New("bad PKCS#7 padding - too short")
|
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
|
||||||
ErrorPaddingNotAllTheSame = errors.New("bad PKCS#7 padding - not all the same")
|
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Pad buf using PKCS#7 to a multiple of n.
|
// Pad buf using PKCS#7 to a multiple of n.
|
||||||
|
|||||||
936
backend/drive/drive.go
Normal file → Executable file
936
backend/drive/drive.go
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
@@ -4,9 +4,8 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"mime"
|
"mime"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -15,20 +14,17 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/sync"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDriveScopes(t *testing.T) {
|
func TestDriveScopes(t *testing.T) {
|
||||||
@@ -77,7 +73,7 @@ var additionalMimeTypes = map[string]string{
|
|||||||
// Load the example export formats into exportFormats for testing
|
// Load the example export formats into exportFormats for testing
|
||||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||||
fetchFormatsOnce.Do(func() {})
|
fetchFormatsOnce.Do(func() {})
|
||||||
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
|
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||||
var about struct {
|
var about struct {
|
||||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||||
@@ -191,69 +187,6 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
gatewayTimeout := googleapi.Error{
|
|
||||||
Code: 503,
|
|
||||||
}
|
|
||||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
|
||||||
assert.True(t, timeoutRetry)
|
|
||||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
|
||||||
generic403 := googleapi.Error{
|
|
||||||
Code: 403,
|
|
||||||
}
|
|
||||||
rLEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "rateLimitExceeded",
|
|
||||||
Message: "User rate limit exceeded.",
|
|
||||||
}
|
|
||||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
|
||||||
oldStopUpload := f.opt.StopOnUploadLimit
|
|
||||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
|
||||||
f.opt.StopOnUploadLimit = true
|
|
||||||
f.opt.StopOnDownloadLimit = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.StopOnUploadLimit = oldStopUpload
|
|
||||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
|
||||||
}()
|
|
||||||
expectedRLError := fserrors.FatalError(&generic403)
|
|
||||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, rateLimitRetry)
|
|
||||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
|
||||||
dQEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "downloadQuotaExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = dQEItem
|
|
||||||
expectedDQError := fserrors.FatalError(&generic403)
|
|
||||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, downloadQuotaRetry)
|
|
||||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
|
||||||
tDFLEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "teamDriveFileLimitExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = tDFLEItem
|
|
||||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
|
||||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, teamDriveFileLimitRetry)
|
|
||||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
|
||||||
qEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "quotaExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = qEItem
|
|
||||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
|
||||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, quotaExceededRetry)
|
|
||||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
|
||||||
|
|
||||||
sqEItem := googleapi.ErrorItem{
|
|
||||||
Reason: "storageQuotaExceeded",
|
|
||||||
}
|
|
||||||
generic403.Errors[0] = sqEItem
|
|
||||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
|
||||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
|
||||||
assert.False(t, storageQuotaExceededRetry)
|
|
||||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||||
oldAllow := f.opt.AllowImportNameChange
|
oldAllow := f.opt.AllowImportNameChange
|
||||||
f.opt.AllowImportNameChange = true
|
f.opt.AllowImportNameChange = true
|
||||||
@@ -442,9 +375,9 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
|||||||
// Make some objects, one in a subdir
|
// Make some objects, one in a subdir
|
||||||
contents := random.String(100)
|
contents := random.String(100)
|
||||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||||
obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||||
_ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||||
|
|
||||||
// Check objects
|
// Check objects
|
||||||
checkObjects := func() {
|
checkObjects := func() {
|
||||||
@@ -486,7 +419,11 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
o := obj.(*Object)
|
o := obj.(*Object)
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = os.RemoveAll(dir)
|
||||||
|
}()
|
||||||
|
|
||||||
checkFile := func(name string) {
|
checkFile := func(name string) {
|
||||||
filePath := filepath.Join(dir, name)
|
filePath := filepath.Join(dir, name)
|
||||||
@@ -524,76 +461,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
|
||||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
|
||||||
// Check set up for filtering
|
|
||||||
assert.True(t, f.Features().FilterAware)
|
|
||||||
|
|
||||||
opt := &filter.Opt{}
|
|
||||||
err := opt.MaxAge.Set("1h")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
flt, err := filter.NewFilter(opt)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
defCtx := context.Background()
|
|
||||||
fltCtx := filter.ReplaceConfig(defCtx, flt)
|
|
||||||
|
|
||||||
testCtx1 := fltCtx
|
|
||||||
testCtx2 := filter.SetUseFilter(testCtx1, true)
|
|
||||||
testCtx3, testCancel := context.WithCancel(testCtx2)
|
|
||||||
testCtx4 := filter.SetUseFilter(testCtx3, false)
|
|
||||||
testCancel()
|
|
||||||
assert.False(t, filter.GetUseFilter(testCtx1))
|
|
||||||
assert.True(t, filter.GetUseFilter(testCtx2))
|
|
||||||
assert.True(t, filter.GetUseFilter(testCtx3))
|
|
||||||
assert.False(t, filter.GetUseFilter(testCtx4))
|
|
||||||
|
|
||||||
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), "agequery-testdir")
|
|
||||||
subFsResult, err := fs.NewFs(defCtx, subRemote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
subFs, isDriveFs := subFsResult.(*Fs)
|
|
||||||
require.True(t, isDriveFs)
|
|
||||||
|
|
||||||
tempDir1 := t.TempDir()
|
|
||||||
tempFs1, err := fs.NewFs(defCtx, tempDir1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tempDir2 := t.TempDir()
|
|
||||||
tempFs2, err := fs.NewFs(defCtx, tempDir2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
file1 := fstest.Item{ModTime: time.Now(), Path: "agequery.txt"}
|
|
||||||
_ = fstests.PutTestContents(defCtx, t, tempFs1, &file1, "abcxyz", true)
|
|
||||||
|
|
||||||
// validate sync/copy
|
|
||||||
const timeQuery = "(modifiedTime >= '"
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(defCtx, subFs, tempFs1, false))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(fltCtx, subFs, tempFs1, false))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(fltCtx, tempFs2, subFs, false))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, sync.CopyDir(defCtx, tempFs2, subFs, false))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
// validate list/walk
|
|
||||||
devNull, errOpen := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
|
||||||
require.NoError(t, errOpen)
|
|
||||||
defer func() {
|
|
||||||
_ = devNull.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
assert.NoError(t, operations.List(defCtx, subFs, devNull))
|
|
||||||
assert.NotContains(t, subFs.lastQuery, timeQuery)
|
|
||||||
|
|
||||||
assert.NoError(t, operations.List(fltCtx, subFs, devNull))
|
|
||||||
assert.Contains(t, subFs.lastQuery, timeQuery)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
// These tests all depend on each other so run them as nested tests
|
// These tests all depend on each other so run them as nested tests
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
t.Run("DocumentImport", func(t *testing.T) {
|
||||||
@@ -611,8 +478,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
|||||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||||
t.Run("CopyID", f.InternalTestCopyID)
|
t.Run("CopyID", f.InternalTestCopyID)
|
||||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
|
||||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -1,608 +0,0 @@
|
|||||||
package drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
drive "google.golang.org/api/drive/v3"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
// system metadata keys which this backend owns
|
|
||||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|
||||||
"content-type": {
|
|
||||||
Help: "The MIME type of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "text/plain",
|
|
||||||
},
|
|
||||||
"mtime": {
|
|
||||||
Help: "Time of last modification with mS accuracy.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"btime": {
|
|
||||||
Help: "Time of file birth (creation) with mS accuracy. Note that this is only writable on fresh uploads - it can't be written for updates.",
|
|
||||||
Type: "RFC 3339",
|
|
||||||
Example: "2006-01-02T15:04:05.999Z07:00",
|
|
||||||
},
|
|
||||||
"copy-requires-writer-permission": {
|
|
||||||
Help: "Whether the options to copy, print, or download this file, should be disabled for readers and commenters.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
},
|
|
||||||
"writers-can-share": {
|
|
||||||
Help: "Whether users with only writer permission can modify the file's permissions. Not populated for items in shared drives.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"viewed-by-me": {
|
|
||||||
Help: "Whether the file has been viewed by this user.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "true",
|
|
||||||
ReadOnly: true,
|
|
||||||
},
|
|
||||||
"owner": {
|
|
||||||
Help: "The owner of the file. Usually an email address. Enable with --drive-metadata-owner.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "user@example.com",
|
|
||||||
},
|
|
||||||
"permissions": {
|
|
||||||
Help: "Permissions in a JSON dump of Google drive format. On shared drives these will only be present if they aren't inherited. Enable with --drive-metadata-permissions.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "{}",
|
|
||||||
},
|
|
||||||
"folder-color-rgb": {
|
|
||||||
Help: "The color for a folder or a shortcut to a folder as an RGB hex string.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "881133",
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
Help: "A short description of the file.",
|
|
||||||
Type: "string",
|
|
||||||
Example: "Contract for signing",
|
|
||||||
},
|
|
||||||
"starred": {
|
|
||||||
Help: "Whether the user has starred the file.",
|
|
||||||
Type: "boolean",
|
|
||||||
Example: "false",
|
|
||||||
},
|
|
||||||
"labels": {
|
|
||||||
Help: "Labels attached to this file in a JSON dump of Googled drive format. Enable with --drive-metadata-labels.",
|
|
||||||
Type: "JSON",
|
|
||||||
Example: "[]",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extra fields we need to fetch to implement the system metadata above
|
|
||||||
var metadataFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"copyRequiresWriterPermission",
|
|
||||||
"description",
|
|
||||||
"folderColorRgb",
|
|
||||||
"hasAugmentedPermissions",
|
|
||||||
"owners",
|
|
||||||
"permissionIds",
|
|
||||||
"permissions",
|
|
||||||
"properties",
|
|
||||||
"starred",
|
|
||||||
"viewedByMe",
|
|
||||||
"viewedByMeTime",
|
|
||||||
"writersCanShare",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// Fields we need to read from permissions
|
|
||||||
var permissionsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
"permissionDetails/*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getPermission returns permissions for the fileID and permissionID passed in
|
|
||||||
func (f *Fs) getPermission(ctx context.Context, fileID, permissionID string, useCache bool) (perm *drive.Permission, inherited bool, err error) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
if useCache {
|
|
||||||
perm = f.permissions[permissionID]
|
|
||||||
if perm != nil {
|
|
||||||
return perm, false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Fetching permission %q", permissionID)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
perm, err = f.svc.Permissions.Get(fileID, permissionID).
|
|
||||||
Fields(permissionsFields).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
inherited = len(perm.PermissionDetails) > 0 && perm.PermissionDetails[0].Inherited
|
|
||||||
|
|
||||||
cleanPermission(perm)
|
|
||||||
|
|
||||||
// cache the permission
|
|
||||||
f.permissions[permissionID] = perm
|
|
||||||
|
|
||||||
return perm, inherited, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the permissions on the info
|
|
||||||
func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions []*drive.Permission) (err error) {
|
|
||||||
for _, perm := range permissions {
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
// ignore owner permissions - these are set with owner
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cleanPermissionForWrite(perm)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Permissions.Create(info.Id, perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set permission: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean attributes from permissions which we can't write
|
|
||||||
func cleanPermissionForWrite(perm *drive.Permission) {
|
|
||||||
perm.Deleted = false
|
|
||||||
perm.DisplayName = ""
|
|
||||||
perm.Id = ""
|
|
||||||
perm.Kind = ""
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean and cache the permission if not already cached
|
|
||||||
func (f *Fs) cleanAndCachePermission(perm *drive.Permission) {
|
|
||||||
f.permissionsMu.Lock()
|
|
||||||
defer f.permissionsMu.Unlock()
|
|
||||||
cleanPermission(perm)
|
|
||||||
if _, found := f.permissions[perm.Id]; !found {
|
|
||||||
f.permissions[perm.Id] = perm
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the permission
|
|
||||||
func cleanPermission(perm *drive.Permission) {
|
|
||||||
// DisplayName: Output only. The "pretty" name of the value of the
|
|
||||||
// permission. The following is a list of examples for each type of
|
|
||||||
// permission: * `user` - User's full name, as defined for their Google
|
|
||||||
// account, such as "Joe Smith." * `group` - Name of the Google Group,
|
|
||||||
// such as "The Company Administrators." * `domain` - String domain
|
|
||||||
// name, such as "thecompany.com." * `anyone` - No `displayName` is
|
|
||||||
// present.
|
|
||||||
perm.DisplayName = ""
|
|
||||||
|
|
||||||
// Kind: Output only. Identifies what kind of resource this is. Value:
|
|
||||||
// the fixed string "drive#permission".
|
|
||||||
perm.Kind = ""
|
|
||||||
|
|
||||||
// PermissionDetails: Output only. Details of whether the permissions on
|
|
||||||
// this shared drive item are inherited or directly on this item. This
|
|
||||||
// is an output-only field which is present only for shared drive items.
|
|
||||||
perm.PermissionDetails = nil
|
|
||||||
|
|
||||||
// PhotoLink: Output only. A link to the user's profile photo, if
|
|
||||||
// available.
|
|
||||||
perm.PhotoLink = ""
|
|
||||||
|
|
||||||
// TeamDrivePermissionDetails: Output only. Deprecated: Output only. Use
|
|
||||||
// `permissionDetails` instead.
|
|
||||||
perm.TeamDrivePermissionDetails = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields we need to read from labels
|
|
||||||
var labelsFields = googleapi.Field(strings.Join([]string{
|
|
||||||
"*",
|
|
||||||
}, ","))
|
|
||||||
|
|
||||||
// getLabels returns labels for the fileID passed in
|
|
||||||
func (f *Fs) getLabels(ctx context.Context, fileID string) (labels []*drive.Label, err error) {
|
|
||||||
fs.Debugf(f, "Fetching labels for %q", fileID)
|
|
||||||
listLabels := f.svc.Files.ListLabels(fileID).
|
|
||||||
Fields(labelsFields).
|
|
||||||
Context(ctx)
|
|
||||||
for {
|
|
||||||
var info *drive.LabelList
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
info, err = listLabels.Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
labels = append(labels, info.Labels...)
|
|
||||||
if info.NextPageToken == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
listLabels.PageToken(info.NextPageToken)
|
|
||||||
}
|
|
||||||
for _, label := range labels {
|
|
||||||
cleanLabel(label)
|
|
||||||
}
|
|
||||||
return labels, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the labels on the info
|
|
||||||
func (f *Fs) setLabels(ctx context.Context, info *drive.File, labels []*drive.Label) (err error) {
|
|
||||||
if len(labels) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
req := drive.ModifyLabelsRequest{}
|
|
||||||
for _, label := range labels {
|
|
||||||
req.LabelModifications = append(req.LabelModifications, &drive.LabelModification{
|
|
||||||
FieldModifications: labelFieldsToFieldModifications(label.Fields),
|
|
||||||
LabelId: label.Id,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Files.ModifyLabels(info.Id, &req).
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set owner: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert label fields into something which can set the fields
|
|
||||||
func labelFieldsToFieldModifications(fields map[string]drive.LabelField) (out []*drive.LabelFieldModification) {
|
|
||||||
for id, field := range fields {
|
|
||||||
var emails []string
|
|
||||||
for _, user := range field.User {
|
|
||||||
emails = append(emails, user.EmailAddress)
|
|
||||||
}
|
|
||||||
out = append(out, &drive.LabelFieldModification{
|
|
||||||
// FieldId: The ID of the field to be modified.
|
|
||||||
FieldId: id,
|
|
||||||
|
|
||||||
// SetDateValues: Replaces the value of a dateString Field with these
|
|
||||||
// new values. The string must be in the RFC 3339 full-date format:
|
|
||||||
// YYYY-MM-DD.
|
|
||||||
SetDateValues: field.DateString,
|
|
||||||
|
|
||||||
// SetIntegerValues: Replaces the value of an `integer` field with these
|
|
||||||
// new values.
|
|
||||||
SetIntegerValues: field.Integer,
|
|
||||||
|
|
||||||
// SetSelectionValues: Replaces a `selection` field with these new
|
|
||||||
// values.
|
|
||||||
SetSelectionValues: field.Selection,
|
|
||||||
|
|
||||||
// SetTextValues: Sets the value of a `text` field.
|
|
||||||
SetTextValues: field.Text,
|
|
||||||
|
|
||||||
// SetUserValues: Replaces a `user` field with these new values. The
|
|
||||||
// values must be valid email addresses.
|
|
||||||
SetUserValues: emails,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean fields we don't need to keep from the label
|
|
||||||
func cleanLabel(label *drive.Label) {
|
|
||||||
// Kind: This is always drive#label
|
|
||||||
label.Kind = ""
|
|
||||||
|
|
||||||
for name, field := range label.Fields {
|
|
||||||
// Kind: This is always drive#labelField.
|
|
||||||
field.Kind = ""
|
|
||||||
|
|
||||||
// Note the fields are copies so we need to write them
|
|
||||||
// back to the map
|
|
||||||
label.Fields[name] = field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the metadata from drive item
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err error) {
|
|
||||||
metadata := make(fs.Metadata, 16)
|
|
||||||
|
|
||||||
// Dump user metadata first as it overrides system metadata
|
|
||||||
for k, v := range info.Properties {
|
|
||||||
metadata[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// System metadata
|
|
||||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
|
||||||
metadata["writers-can-share"] = fmt.Sprint(info.WritersCanShare)
|
|
||||||
metadata["viewed-by-me"] = fmt.Sprint(info.ViewedByMe)
|
|
||||||
metadata["content-type"] = info.MimeType
|
|
||||||
|
|
||||||
// Owners: Output only. The owner of this file. Only certain legacy
|
|
||||||
// files may have more than one owner. This field isn't populated for
|
|
||||||
// items in shared drives.
|
|
||||||
if o.fs.opt.MetadataOwner.IsSet(rwRead) && len(info.Owners) > 0 {
|
|
||||||
user := info.Owners[0]
|
|
||||||
if len(info.Owners) > 1 {
|
|
||||||
fs.Logf(o, "Ignoring more than 1 owner")
|
|
||||||
}
|
|
||||||
if user != nil {
|
|
||||||
id := user.EmailAddress
|
|
||||||
if id == "" {
|
|
||||||
id = user.DisplayName
|
|
||||||
}
|
|
||||||
metadata["owner"] = id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
|
||||||
// We only write permissions out if they are not inherited.
|
|
||||||
//
|
|
||||||
// On My Drives permissions seem to be attached to every item
|
|
||||||
// so they will always be written out.
|
|
||||||
//
|
|
||||||
// On Shared Drives only non-inherited permissions will be
|
|
||||||
// written out.
|
|
||||||
|
|
||||||
// To read the inherited permissions flag will mean we need to
|
|
||||||
// read the permissions for each object and the cache will be
|
|
||||||
// useless. However shared drives don't return permissions
|
|
||||||
// only permissionIds so will need to fetch them for each
|
|
||||||
// object. We use HasAugmentedPermissions to see if there are
|
|
||||||
// special permissions before fetching them to save transactions.
|
|
||||||
|
|
||||||
// HasAugmentedPermissions: Output only. Whether there are permissions
|
|
||||||
// directly on this file. This field is only populated for items in
|
|
||||||
// shared drives.
|
|
||||||
if o.fs.isTeamDrive && !info.HasAugmentedPermissions {
|
|
||||||
// Don't process permissions if there aren't any specifically set
|
|
||||||
info.Permissions = nil
|
|
||||||
info.PermissionIds = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PermissionIds: Output only. List of permission IDs for users with
|
|
||||||
// access to this file.
|
|
||||||
//
|
|
||||||
// Only process these if we have no Permissions
|
|
||||||
if len(info.PermissionIds) > 0 && len(info.Permissions) == 0 {
|
|
||||||
info.Permissions = make([]*drive.Permission, 0, len(info.PermissionIds))
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
g.SetLimit(o.fs.ci.Checkers)
|
|
||||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
|
||||||
for _, permissionID := range info.PermissionIds {
|
|
||||||
permissionID := permissionID
|
|
||||||
g.Go(func() error {
|
|
||||||
// must fetch the team drive ones individually to check the inherited flag
|
|
||||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read permission: %w", err)
|
|
||||||
}
|
|
||||||
// Don't write inherited permissions out
|
|
||||||
if inherited {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// Don't write owner role out - these are covered by the owner metadata
|
|
||||||
if perm.Role == "owner" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
info.Permissions = append(info.Permissions, perm)
|
|
||||||
mu.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
err = g.Wait()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Clean the fetched permissions
|
|
||||||
for _, perm := range info.Permissions {
|
|
||||||
o.fs.cleanAndCachePermission(perm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permissions: Output only. The full list of permissions for the file.
|
|
||||||
// This is only available if the requesting user can share the file. Not
|
|
||||||
// populated for items in shared drives.
|
|
||||||
if len(info.Permissions) > 0 {
|
|
||||||
buf, err := json.Marshal(info.Permissions)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
metadata["permissions"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Permission propagation
|
|
||||||
// https://developers.google.com/drive/api/guides/manage-sharing#permission-propagation
|
|
||||||
// Leads me to believe that in non shared drives, permissions
|
|
||||||
// are added to each item when you set permissions for a
|
|
||||||
// folder whereas in shared drives they are inherited and
|
|
||||||
// placed on the item directly.
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.FolderColorRgb != "" {
|
|
||||||
metadata["folder-color-rgb"] = info.FolderColorRgb
|
|
||||||
}
|
|
||||||
if info.Description != "" {
|
|
||||||
metadata["description"] = info.Description
|
|
||||||
}
|
|
||||||
metadata["starred"] = fmt.Sprint(info.Starred)
|
|
||||||
metadata["btime"] = info.CreatedTime
|
|
||||||
metadata["mtime"] = info.ModifiedTime
|
|
||||||
|
|
||||||
if o.fs.opt.MetadataLabels.IsSet(rwRead) {
|
|
||||||
// FIXME would be really nice if we knew if files had labels
|
|
||||||
// before listing but we need to know all possible label IDs
|
|
||||||
// to get it in the listing.
|
|
||||||
|
|
||||||
labels, err := o.fs.getLabels(ctx, actualID(info.Id))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to fetch labels: %w", err)
|
|
||||||
}
|
|
||||||
buf, err := json.Marshal(labels)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to marshal labels: %w", err)
|
|
||||||
}
|
|
||||||
metadata["labels"] = string(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
o.metadata = &metadata
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the owner on the info
|
|
||||||
func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err error) {
|
|
||||||
perm := drive.Permission{
|
|
||||||
Role: "owner",
|
|
||||||
EmailAddress: owner,
|
|
||||||
// Type: The type of the grantee. Valid values are: * `user` * `group` *
|
|
||||||
// `domain` * `anyone` When creating a permission, if `type` is `user`
|
|
||||||
// or `group`, you must provide an `emailAddress` for the user or group.
|
|
||||||
// When `type` is `domain`, you must provide a `domain`. There isn't
|
|
||||||
// extra information required for an `anyone` type.
|
|
||||||
Type: "user",
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
_, err = f.svc.Permissions.Create(info.Id, &perm).
|
|
||||||
SupportsAllDrives(true).
|
|
||||||
TransferOwnership(true).
|
|
||||||
// SendNotificationEmail(false). - required apparently!
|
|
||||||
Context(ctx).Do()
|
|
||||||
return f.shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to set owner: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call back to set metadata that can't be set on the upload/update
|
|
||||||
//
|
|
||||||
// The *drive.File passed in holds the current state of the drive.File
|
|
||||||
// and this should update it with any modifications.
|
|
||||||
type updateMetadataFn func(context.Context, *drive.File) error
|
|
||||||
|
|
||||||
// read the metadata from meta and write it into updateInfo
|
|
||||||
//
|
|
||||||
// update should be true if this is being used to create metadata for
|
|
||||||
// an update/PATCH call as the rules on what can be updated are
|
|
||||||
// slightly different there.
|
|
||||||
//
|
|
||||||
// It returns a callback which should be called to finish the updates
|
|
||||||
// after the data is uploaded.
|
|
||||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
|
||||||
callbackFns := []updateMetadataFn{}
|
|
||||||
callback = func(ctx context.Context, info *drive.File) error {
|
|
||||||
for _, fn := range callbackFns {
|
|
||||||
err := fn(ctx, info)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// merge metadata into request and user metadata
|
|
||||||
for k, v := range meta {
|
|
||||||
k, v := k, v
|
|
||||||
// parse a boolean from v and write into out
|
|
||||||
parseBool := func(out *bool) error {
|
|
||||||
b, err := strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("can't parse metadata %q = %q: %w", k, v, err)
|
|
||||||
}
|
|
||||||
*out = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case "copy-requires-writer-permission":
|
|
||||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "writers-can-share":
|
|
||||||
if err := parseBool(&updateInfo.WritersCanShare); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "viewed-by-me":
|
|
||||||
// Can't write this
|
|
||||||
case "content-type":
|
|
||||||
updateInfo.MimeType = v
|
|
||||||
case "owner":
|
|
||||||
if !f.opt.MetadataOwner.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Can't set Owner on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
return f.setOwner(ctx, info, v)
|
|
||||||
})
|
|
||||||
case "permissions":
|
|
||||||
if !f.opt.MetadataPermissions.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var perms []*drive.Permission
|
|
||||||
err := json.Unmarshal([]byte(v), &perms)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Permissions on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
return f.setPermissions(ctx, info, perms)
|
|
||||||
})
|
|
||||||
case "labels":
|
|
||||||
if !f.opt.MetadataLabels.IsSet(rwWrite) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var labels []*drive.Label
|
|
||||||
err := json.Unmarshal([]byte(v), &labels)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to unmarshal labels: %w", err)
|
|
||||||
}
|
|
||||||
// Can't set Labels on upload so need to set afterwards
|
|
||||||
callbackFns = append(callbackFns, func(ctx context.Context, info *drive.File) error {
|
|
||||||
return f.setLabels(ctx, info, labels)
|
|
||||||
})
|
|
||||||
case "folder-color-rgb":
|
|
||||||
updateInfo.FolderColorRgb = v
|
|
||||||
case "description":
|
|
||||||
updateInfo.Description = v
|
|
||||||
case "starred":
|
|
||||||
if err := parseBool(&updateInfo.Starred); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "btime":
|
|
||||||
if update {
|
|
||||||
fs.Debugf(f, "Skipping btime metadata as can't update it on an existing file: %v", v)
|
|
||||||
} else {
|
|
||||||
updateInfo.CreatedTime = v
|
|
||||||
}
|
|
||||||
case "mtime":
|
|
||||||
updateInfo.ModifiedTime = v
|
|
||||||
default:
|
|
||||||
if updateInfo.Properties == nil {
|
|
||||||
updateInfo.Properties = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
updateInfo.Properties[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch metadata and update updateInfo if --metadata is in use
|
|
||||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *drive.File, update bool) (callback updateMetadataFn, err error) {
|
|
||||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
|
||||||
}
|
|
||||||
return callback, nil
|
|
||||||
}
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
// This file contains the implementation of the sync batcher for uploads
|
|
||||||
//
|
|
||||||
// Dropbox rules say you can start as many batches as you want, but
|
|
||||||
// you may only have one batch being committed and must wait for the
|
|
||||||
// batch to be finished before committing another.
|
|
||||||
|
|
||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// finishBatch commits the batch, returning a batch status to poll or maybe complete
|
|
||||||
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
|
|
||||||
var arg = &files.UploadSessionFinishBatchArg{
|
|
||||||
Entries: items,
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
|
|
||||||
// If error is insufficient space then don't retry
|
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
|
||||||
err = fserrors.NoRetryError(err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// after the first chunk is uploaded, we retry everything
|
|
||||||
return err != nil, err
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("batch commit failed: %w", err)
|
|
||||||
}
|
|
||||||
return complete, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by the batcher to commit a batch
|
|
||||||
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
|
|
||||||
// finalise the batch getting either a result or a job id to poll
|
|
||||||
complete, err := f.finishBatch(ctx, items)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check we got the right number of entries
|
|
||||||
entries := complete.Entries
|
|
||||||
if len(entries) != len(results) {
|
|
||||||
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Format results for return
|
|
||||||
for i := range results {
|
|
||||||
item := entries[i]
|
|
||||||
if item.Tag == "success" {
|
|
||||||
results[i] = item.Success
|
|
||||||
} else {
|
|
||||||
errorTag := item.Tag
|
|
||||||
if item.Failure != nil {
|
|
||||||
errorTag = item.Failure.Tag
|
|
||||||
if item.Failure.LookupFailed != nil {
|
|
||||||
errorTag += "/" + item.Failure.LookupFailed.Tag
|
|
||||||
}
|
|
||||||
if item.Failure.Path != nil {
|
|
||||||
errorTag += "/" + item.Failure.Path.Tag
|
|
||||||
}
|
|
||||||
if item.Failure.PropertiesError != nil {
|
|
||||||
errorTag += "/" + item.Failure.PropertiesError.Tag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
396
backend/dropbox/dropbox.go
Normal file → Executable file
396
backend/dropbox/dropbox.go
Normal file → Executable file
@@ -23,22 +23,23 @@ of path_display and all will be well.
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/auth"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/common"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/sharing"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/team"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -47,7 +48,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
@@ -59,15 +59,15 @@ import (
|
|||||||
const (
|
const (
|
||||||
rcloneClientID = "5jcck7diasz0rqy"
|
rcloneClientID = "5jcck7diasz0rqy"
|
||||||
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
rcloneEncryptedClientSecret = "fRS5vVLr2v6FbyXYnIgjwBuUAt0osq_QZTXAEcmZ7g"
|
||||||
defaultMinSleep = fs.Duration(10 * time.Millisecond)
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
// Upload chunk size - setting too small makes uploads slow.
|
// Upload chunk size - setting too small makes uploads slow.
|
||||||
// Chunks are buffered into memory for retries.
|
// Chunks are buffered into memory for retries.
|
||||||
//
|
//
|
||||||
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
|
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
|
||||||
//
|
//
|
||||||
// Chunk Size MiB, Speed MiB/s, % of max
|
// Chunk Size MB, Speed Mbyte/s, % of max
|
||||||
// 1 1.364 11%
|
// 1 1.364 11%
|
||||||
// 2 2.443 19%
|
// 2 2.443 19%
|
||||||
// 4 4.288 33%
|
// 4 4.288 33%
|
||||||
@@ -82,11 +82,11 @@ const (
|
|||||||
// 96 12.302 95%
|
// 96 12.302 95%
|
||||||
// 128 12.945 100%
|
// 128 12.945 100%
|
||||||
//
|
//
|
||||||
// Choose 48 MiB which is 91% of Maximum speed. rclone by
|
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||||
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
|
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||||
// by default.
|
// by default.
|
||||||
defaultChunkSize = 48 * fs.Mebi
|
defaultChunkSize = 48 * fs.MebiByte
|
||||||
maxChunkSize = 150 * fs.Mebi
|
maxChunkSize = 150 * fs.MebiByte
|
||||||
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
|
||||||
maxFileNameLength = 255
|
maxFileNameLength = 255
|
||||||
)
|
)
|
||||||
@@ -99,10 +99,8 @@ var (
|
|||||||
"files.content.write",
|
"files.content.write",
|
||||||
"files.content.read",
|
"files.content.read",
|
||||||
"sharing.write",
|
"sharing.write",
|
||||||
"account_info.read", // needed for About
|
|
||||||
// "file_requests.write",
|
// "file_requests.write",
|
||||||
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
|
||||||
// "team_data.member"
|
|
||||||
},
|
},
|
||||||
// Endpoint: oauth2.Endpoint{
|
// Endpoint: oauth2.Endpoint{
|
||||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||||
@@ -122,14 +120,6 @@ var (
|
|||||||
|
|
||||||
// Errors
|
// Errors
|
||||||
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
|
||||||
|
|
||||||
// Configure the batcher
|
|
||||||
defaultBatcherOptions = batcher.Options{
|
|
||||||
MaxBatchSize: 1000,
|
|
||||||
DefaultTimeoutSync: 500 * time.Millisecond,
|
|
||||||
DefaultTimeoutAsync: 10 * time.Second,
|
|
||||||
DefaultBatchSizeAsync: 100,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gets an oauth config with the right scopes
|
// Gets an oauth config with the right scopes
|
||||||
@@ -140,36 +130,39 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
|
|||||||
}
|
}
|
||||||
// Make a copy of the config
|
// Make a copy of the config
|
||||||
config := *dropboxConfig
|
config := *dropboxConfig
|
||||||
// Make a copy of the scopes with extra scopes requires appended
|
// Make a copy of the scopes with "members.read" appended
|
||||||
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
|
config.Scopes = append(config.Scopes, "members.read")
|
||||||
return &config
|
return &config
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
|
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "dropbox",
|
Name: "dropbox",
|
||||||
Description: "Dropbox",
|
Description: "Dropbox",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
opt := oauthutil.Options{
|
||||||
OAuth2Config: getOauthConfig(m),
|
NoOffline: true,
|
||||||
NoOffline: true,
|
|
||||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||||
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
oauth2.SetAuthURLParam("token_access_type", "offline"),
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
|
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size (< %v).
|
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||||
|
|
||||||
Any files larger than this will be uploaded in chunks of this size.
|
Any files larger than this will be uploaded in chunks of this size.
|
||||||
|
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||||
deal with retries. Setting this larger will increase the speed
|
deal with retries. Setting this larger will increase the speed
|
||||||
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
|
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -191,9 +184,8 @@ client_secret) to use this option as currently rclone's default set of
|
|||||||
permissions doesn't include "members.read". This can be added once
|
permissions doesn't include "members.read". This can be added once
|
||||||
v1.55 or later is in use everywhere.
|
v1.55 or later is in use everywhere.
|
||||||
`,
|
`,
|
||||||
Default: "",
|
Default: "",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "shared_files",
|
Name: "shared_files",
|
||||||
Help: `Instructs rclone to work on individual shared files.
|
Help: `Instructs rclone to work on individual shared files.
|
||||||
@@ -219,11 +211,6 @@ Note that we don't unmount the shared folder afterwards so the
|
|||||||
shared folder.`,
|
shared folder.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "pacer_min_sleep",
|
|
||||||
Default: defaultMinSleep,
|
|
||||||
Help: "Minimum time to sleep between API calls.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -237,7 +224,7 @@ shared folder.`,
|
|||||||
encoder.EncodeDel |
|
encoder.EncodeDel |
|
||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeInvalidUtf8,
|
encoder.EncodeInvalidUtf8,
|
||||||
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,11 +234,6 @@ type Options struct {
|
|||||||
Impersonate string `config:"impersonate"`
|
Impersonate string `config:"impersonate"`
|
||||||
SharedFiles bool `config:"shared_files"`
|
SharedFiles bool `config:"shared_files"`
|
||||||
SharedFolders bool `config:"shared_folders"`
|
SharedFolders bool `config:"shared_folders"`
|
||||||
BatchMode string `config:"batch_mode"`
|
|
||||||
BatchSize int `config:"batch_size"`
|
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
|
||||||
AsyncBatch bool `config:"async_batch"`
|
|
||||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,7 +253,6 @@ type Fs struct {
|
|||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a dropbox object
|
// Object describes a dropbox object
|
||||||
@@ -287,6 +268,8 @@ type Object struct {
|
|||||||
hash string // content_hash of the object
|
hash string // content_hash of the object
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
// Name of the remote (as passed into NewFs)
|
||||||
func (f *Fs) Name() string {
|
func (f *Fs) Name() string {
|
||||||
return f.name
|
return f.name
|
||||||
@@ -316,36 +299,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
errString := err.Error()
|
baseErrString := errors.Cause(err).Error()
|
||||||
// First check for specific errors
|
// First check for specific errors
|
||||||
if strings.Contains(errString, "insufficient_space") {
|
if strings.Contains(baseErrString, "insufficient_space") {
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
} else if strings.Contains(errString, "malformed_path") {
|
} else if strings.Contains(baseErrString, "malformed_path") {
|
||||||
return false, fserrors.NoRetryError(err)
|
return false, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
// Then handle any official Retry-After header from Dropbox's SDK
|
// Then handle any official Retry-After header from Dropbox's SDK
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case auth.RateLimitAPIError:
|
case auth.RateLimitAPIError:
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
if e.RateLimitError.RetryAfter > 0 {
|
||||||
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behavior for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
|
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||||
const minChunkSize = fs.SizeSuffixBase
|
const minChunkSize = fs.Byte
|
||||||
if cs < minChunkSize {
|
if cs < minChunkSize {
|
||||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||||
}
|
}
|
||||||
if cs > maxChunkSize {
|
if cs > maxChunkSize {
|
||||||
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -368,7 +351,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
err = checkUploadChunkSize(opt.ChunkSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("dropbox: chunk size: %w", err)
|
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the old token if it exists. The old token was just
|
// Convert the old token if it exists. The old token was just
|
||||||
@@ -380,13 +363,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
return nil, errors.Wrap(err, "NewFS convert token")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure dropbox: %w", err)
|
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -395,15 +378,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: ci,
|
ci: ci,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
|
||||||
batcherOptions := defaultBatcherOptions
|
|
||||||
batcherOptions.Mode = f.opt.BatchMode
|
|
||||||
batcherOptions.Size = f.opt.BatchSize
|
|
||||||
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
|
||||||
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
cfg := dropbox.Config{
|
cfg := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
@@ -429,11 +404,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
args := team.NewMembersGetInfoArgs(members)
|
args := team.NewMembersGetInfoArgs(members)
|
||||||
|
|
||||||
memberIds, err := f.team.MembersGetInfo(args)
|
memberIds, err := f.team.MembersGetInfo(args)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
||||||
}
|
|
||||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
|
||||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||||
@@ -493,7 +466,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// if the mount failed we have to abort here
|
// if the moint failed we have to abort here
|
||||||
}
|
}
|
||||||
// if the mount succeeded it's now a normal folder in the users root namespace
|
// if the mount succeeded it's now a normal folder in the users root namespace
|
||||||
// we disable shared folder mode and proceed normally
|
// we disable shared folder mode and proceed normally
|
||||||
@@ -510,7 +483,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("get current account failed: %w", err)
|
return nil, errors.Wrap(err, "get current account failed")
|
||||||
}
|
}
|
||||||
switch x := acc.RootInfo.(type) {
|
switch x := acc.RootInfo.(type) {
|
||||||
case *common.TeamRootInfo:
|
case *common.TeamRootInfo:
|
||||||
@@ -518,30 +491,28 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
case *common.UserRootInfo:
|
case *common.UserRootInfo:
|
||||||
f.ns = x.RootNamespaceId
|
f.ns = x.RootNamespaceId
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
|
|
||||||
// See if the root is actually an object
|
// See if the root is actually an object
|
||||||
if f.root != "" {
|
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
||||||
_, err = f.getFileMetadata(ctx, f.slashRoot)
|
if err == nil {
|
||||||
if err == nil {
|
newRoot := path.Dir(f.root)
|
||||||
newRoot := path.Dir(f.root)
|
if newRoot == "." {
|
||||||
if newRoot == "." {
|
newRoot = ""
|
||||||
newRoot = ""
|
|
||||||
}
|
|
||||||
f.setRoot(newRoot)
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
}
|
||||||
|
f.setRoot(newRoot)
|
||||||
|
// return an error with an fs which points to the parent
|
||||||
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// headerGenerator for dropbox sdk
|
// headerGenerator for dropbox sdk
|
||||||
func (f *Fs) headerGenerator(hostType string, namespace string, route string) map[string]string {
|
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||||
if f.ns == "" {
|
if f.ns == "" {
|
||||||
return map[string]string{}
|
return map[string]string{}
|
||||||
}
|
}
|
||||||
@@ -591,9 +562,6 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
|
|||||||
}
|
}
|
||||||
fileInfo, ok := entry.(*files.FileMetadata)
|
fileInfo, ok := entry.(*files.FileMetadata)
|
||||||
if !ok {
|
if !ok {
|
||||||
if _, ok = entry.(*files.FolderMetadata); ok {
|
|
||||||
return nil, fs.ErrorIsDir
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotAFile
|
return nil, fs.ErrorNotAFile
|
||||||
}
|
}
|
||||||
return fileInfo, nil
|
return fileInfo, nil
|
||||||
@@ -671,12 +639,12 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return nil, errors.Wrap(err, "list continue")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -745,7 +713,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return nil, errors.Wrap(err, "list continue")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -755,7 +723,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
|||||||
fs: f,
|
fs: f,
|
||||||
url: entry.PreviewUrl,
|
url: entry.PreviewUrl,
|
||||||
remote: entryPath,
|
remote: entryPath,
|
||||||
modTime: *entry.TimeInvited,
|
modTime: entry.TimeInvited,
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -811,7 +779,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: f.opt.Enc.FromStandardPath(root),
|
Path: f.opt.Enc.FromStandardPath(root),
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
Limit: 1000,
|
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
arg.Path = "" // Specify root folder as empty string
|
arg.Path = "" // Specify root folder as empty string
|
||||||
@@ -839,7 +806,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list continue: %w", err)
|
return nil, errors.Wrap(err, "list continue")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range res.Entries {
|
for _, entry := range res.Entries {
|
||||||
@@ -863,7 +830,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||||
remote := path.Join(dir, leaf)
|
remote := path.Join(dir, leaf)
|
||||||
if folderInfo != nil {
|
if folderInfo != nil {
|
||||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if fileInfo != nil {
|
} else if fileInfo != nil {
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||||
@@ -882,7 +849,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -946,18 +913,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
if root == "/" {
|
if root == "/" {
|
||||||
return errors.New("can't remove root directory")
|
return errors.New("can't remove root directory")
|
||||||
}
|
}
|
||||||
encRoot := f.opt.Enc.FromStandardPath(root)
|
|
||||||
|
|
||||||
if check {
|
if check {
|
||||||
// check directory exists
|
// check directory exists
|
||||||
_, err = f.getDirMetadata(ctx, root)
|
_, err = f.getDirMetadata(ctx, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Rmdir: %w", err)
|
return errors.Wrap(err, "Rmdir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
root = f.opt.Enc.FromStandardPath(root)
|
||||||
// check directory empty
|
// check directory empty
|
||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: encRoot,
|
Path: root,
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
@@ -969,7 +936,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Rmdir: %w", err)
|
return errors.Wrap(err, "Rmdir")
|
||||||
}
|
}
|
||||||
if len(res.Entries) != 0 {
|
if len(res.Entries) != 0 {
|
||||||
return errors.New("directory not empty")
|
return errors.New("directory not empty")
|
||||||
@@ -978,7 +945,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
|||||||
|
|
||||||
// remove it
|
// remove it
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: encRoot})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
@@ -1001,9 +968,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1035,7 +1002,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("copy failed: %w", err)
|
return nil, errors.Wrap(err, "copy failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the metadata
|
// Set the metadata
|
||||||
@@ -1045,7 +1012,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("copy failed: %w", err)
|
return nil, errors.Wrap(err, "copy failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
@@ -1062,9 +1029,9 @@ func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
|||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1096,7 +1063,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("move failed: %w", err)
|
return nil, errors.Wrap(err, "move failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the metadata
|
// Set the metadata
|
||||||
@@ -1106,7 +1073,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err = dstObj.setMetadataFromEntry(fileInfo)
|
err = dstObj.setMetadataFromEntry(fileInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("move failed: %w", err)
|
return nil, errors.Wrap(err, "move failed")
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1131,7 +1098,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
}
|
}
|
||||||
if expire < fs.DurationOff {
|
if expire < fs.DurationOff {
|
||||||
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
expiryTime := time.Now().Add(time.Duration(expire)).UTC().Round(time.Second)
|
||||||
createArg.Settings.Expires = &expiryTime
|
createArg.Settings.Expires = expiryTime
|
||||||
|
}
|
||||||
|
// FIXME note we can't set Settings for non enterprise dropbox
|
||||||
|
// because of https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75
|
||||||
|
// however this only goes wrong when we set Expires, so as a
|
||||||
|
// work-around remove Settings unless expire is set.
|
||||||
|
if expire == fs.DurationOff {
|
||||||
|
createArg.Settings = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var linkRes sharing.IsSharedLinkMetadata
|
var linkRes sharing.IsSharedLinkMetadata
|
||||||
@@ -1156,7 +1130,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if len(listRes.Links) == 0 {
|
if len(listRes.Links) == 0 {
|
||||||
err = errors.New("sharing link already exists, but list came back empty")
|
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
linkRes = listRes.Links[0]
|
linkRes = listRes.Links[0]
|
||||||
@@ -1168,7 +1142,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
case *sharing.FolderLinkMetadata:
|
case *sharing.FolderLinkMetadata:
|
||||||
link = res.Url
|
link = res.Url
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("don't know how to extract link, response has unknown format: %T", res)
|
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@@ -1214,7 +1188,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("MoveDir failed: %w", err)
|
return errors.Wrap(err, "MoveDir failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1228,24 +1202,21 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "about failed")
|
||||||
}
|
}
|
||||||
var total uint64
|
var total uint64
|
||||||
var used = q.Used
|
|
||||||
if q.Allocation != nil {
|
if q.Allocation != nil {
|
||||||
if q.Allocation.Individual != nil {
|
if q.Allocation.Individual != nil {
|
||||||
total += q.Allocation.Individual.Allocated
|
total += q.Allocation.Individual.Allocated
|
||||||
}
|
}
|
||||||
if q.Allocation.Team != nil {
|
if q.Allocation.Team != nil {
|
||||||
total += q.Allocation.Team.Allocated
|
total += q.Allocation.Team.Allocated
|
||||||
// Override used with Team.Used as this includes q.Used already
|
|
||||||
used = q.Allocation.Team.Used
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
@@ -1332,12 +1303,10 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
|
|
||||||
if timeout < 30 {
|
if timeout < 30 {
|
||||||
timeout = 30
|
timeout = 30
|
||||||
fs.Debugf(f, "Increasing poll interval to minimum 30s")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout > 480 {
|
if timeout > 480 {
|
||||||
timeout = 480
|
timeout = 480
|
||||||
fs.Debugf(f, "Decreasing poll interval to maximum 480s")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -1373,7 +1342,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("list continue: %w", err)
|
return "", errors.Wrap(err, "list continue")
|
||||||
}
|
}
|
||||||
cursor = changeList.Cursor
|
cursor = changeList.Cursor
|
||||||
var entryType fs.EntryType
|
var entryType fs.EntryType
|
||||||
@@ -1382,20 +1351,20 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
switch info := entry.(type) {
|
switch info := entry.(type) {
|
||||||
case *files.FolderMetadata:
|
case *files.FolderMetadata:
|
||||||
entryType = fs.EntryDirectory
|
entryType = fs.EntryDirectory
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
case *files.FileMetadata:
|
case *files.FileMetadata:
|
||||||
entryType = fs.EntryObject
|
entryType = fs.EntryObject
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
case *files.DeletedMetadata:
|
case *files.DeletedMetadata:
|
||||||
entryType = fs.EntryObject
|
entryType = fs.EntryObject
|
||||||
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
|
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
|
||||||
default:
|
default:
|
||||||
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if entryPath != "" {
|
if entryPath != "" {
|
||||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
notifyFunc(entryPath, entryType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !changeList.HasMore {
|
if !changeList.HasMore {
|
||||||
@@ -1410,13 +1379,6 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(DbHashType)
|
return hash.Set(DbHashType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.batcher.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
@@ -1452,7 +1414,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
}
|
}
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to read hash from metadata: %w", err)
|
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||||
}
|
}
|
||||||
return o.hash, nil
|
return o.hash, nil
|
||||||
}
|
}
|
||||||
@@ -1576,110 +1538,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// uploadChunked uploads the object in parts
|
// uploadChunked uploads the object in parts
|
||||||
//
|
//
|
||||||
// Will introduce two additional network requests to start and finish the session.
|
// Will work optimally if size is >= uploadChunkSize. If the size is either
|
||||||
// If the size is unknown (i.e. -1) the method incurs one additional
|
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||||
// request to the Dropbox API that does not carry a payload to close the append session.
|
// avoidable request to the Dropbox API that does not carry payload.
|
||||||
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||||
// start upload
|
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||||
|
chunks := 0
|
||||||
|
if size != -1 {
|
||||||
|
chunks = int(size/chunkSize) + 1
|
||||||
|
}
|
||||||
|
in := readers.NewCountingReader(in0)
|
||||||
|
buf := make([]byte, int(chunkSize))
|
||||||
|
|
||||||
|
fmtChunk := func(cur int, last bool) {
|
||||||
|
if chunks == 0 && last {
|
||||||
|
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
|
||||||
|
} else if chunks == 0 {
|
||||||
|
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
|
||||||
|
} else {
|
||||||
|
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// write the first chunk
|
||||||
|
fmtChunk(1, false)
|
||||||
var res *files.UploadSessionStartResult
|
var res *files.UploadSessionStartResult
|
||||||
|
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
|
// seek to the start in case this is a retry
|
||||||
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
|
||||||
chunks, remainder := size/chunkSize, size%chunkSize
|
|
||||||
if remainder > 0 {
|
|
||||||
chunks++
|
|
||||||
}
|
|
||||||
|
|
||||||
// write chunks
|
|
||||||
in := readers.NewCountingReader(in0)
|
|
||||||
buf := make([]byte, int(chunkSize))
|
|
||||||
cursor := files.UploadSessionCursor{
|
cursor := files.UploadSessionCursor{
|
||||||
SessionId: res.SessionId,
|
SessionId: res.SessionId,
|
||||||
Offset: 0,
|
Offset: 0,
|
||||||
}
|
}
|
||||||
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
|
appendArg := files.UploadSessionAppendArg{
|
||||||
for currentChunk := 1; ; currentChunk++ {
|
Cursor: &cursor,
|
||||||
cursor.Offset = in.BytesRead()
|
Close: false,
|
||||||
|
}
|
||||||
|
|
||||||
if chunks < 0 {
|
// write more whole chunks (if any)
|
||||||
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
|
currentChunk := 2
|
||||||
} else {
|
for {
|
||||||
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
|
if chunks > 0 && currentChunk >= chunks {
|
||||||
|
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
|
||||||
|
// the UploadSessionFinish request.
|
||||||
|
break
|
||||||
|
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
|
||||||
|
// if the size is unknown, upload as long as we can read full chunks from the reader.
|
||||||
|
// The UploadSessionFinish request will not contain any payload.
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
cursor.Offset = in.BytesRead()
|
||||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
fmtChunk(currentChunk, false)
|
||||||
skip := int64(0)
|
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
// seek to the start in case this is a retry
|
// seek to the start in case this is a retry
|
||||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
return false, err
|
return false, nil
|
||||||
}
|
}
|
||||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||||
// after session is started, we retry everything
|
// after the first chunk is uploaded, we retry everything
|
||||||
if err != nil {
|
|
||||||
// Check for incorrect offset error and retry with new offset
|
|
||||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
|
||||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
|
||||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
|
||||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
|
||||||
skip += delta
|
|
||||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
|
||||||
if skip < 0 {
|
|
||||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
|
||||||
} else if skip == chunkSize {
|
|
||||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
|
||||||
return false, nil
|
|
||||||
} else if skip > chunkSize {
|
|
||||||
// This error should never happen
|
|
||||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
|
||||||
}
|
|
||||||
// Skip the sent data on next retry
|
|
||||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
|
||||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err != nil, err
|
return err != nil, err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if appendArg.Close {
|
currentChunk++
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 {
|
|
||||||
// if size is known, check if next chunk is final
|
|
||||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
|
||||||
if in.BytesRead() > uint64(size) {
|
|
||||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
|
||||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish upload
|
// write the remains
|
||||||
cursor.Offset = in.BytesRead()
|
cursor.Offset = in.BytesRead()
|
||||||
args := &files.UploadSessionFinishArg{
|
args := &files.UploadSessionFinishArg{
|
||||||
Cursor: &cursor,
|
Cursor: &cursor,
|
||||||
Commit: commitInfo,
|
Commit: commitInfo,
|
||||||
}
|
}
|
||||||
// If we are batching then we should have written all the data now
|
fmtChunk(currentChunk, true)
|
||||||
// store the commit info now for a batch commit
|
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||||
if o.fs.batcher.Batching() {
|
|
||||||
return o.fs.batcher.Commit(ctx, o.remote, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
|
// seek to the start in case this is a retry
|
||||||
|
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||||
// If error is insufficient space then don't retry
|
// If error is insufficient space then don't retry
|
||||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||||
@@ -1723,7 +1672,7 @@ func checkPathLength(name string) (err error) {
|
|||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size.
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
@@ -1732,13 +1681,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
return fserrors.NoRetryError(fmt.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||||
}
|
}
|
||||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
clientModified := src.ModTime(ctx).UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||||
commitInfo.ClientModified = &clientModified
|
|
||||||
// Don't attempt to create filenames that are too long
|
// Don't attempt to create filenames that are too long
|
||||||
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
if cErr := checkPathLength(commitInfo.Path); cErr != nil {
|
||||||
return cErr
|
return cErr
|
||||||
@@ -1747,25 +1695,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
var entry *files.FileMetadata
|
var entry *files.FileMetadata
|
||||||
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
|
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||||
} else {
|
} else {
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("upload failed: %w", err)
|
return errors.Wrap(err, "upload failed")
|
||||||
}
|
|
||||||
// If we haven't received data back from batch upload then fake it
|
|
||||||
//
|
|
||||||
// This will only happen if we are uploading async batches
|
|
||||||
if entry == nil {
|
|
||||||
o.bytes = size
|
|
||||||
o.modTime = *commitInfo.ClientModified
|
|
||||||
o.hash = "" // we don't have this
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
return o.setMetadataFromEntry(entry)
|
return o.setMetadataFromEntry(entry)
|
||||||
}
|
}
|
||||||
@@ -1794,7 +1733,6 @@ var (
|
|||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = &Fs{}
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,16 +2,14 @@ package fichier
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -28,44 +26,25 @@ var retryErrorCodes = []int{
|
|||||||
509, // Bandwidth Limit Exceeded
|
509, // Bandwidth Limit Exceeded
|
||||||
}
|
}
|
||||||
|
|
||||||
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
|
||||||
|
|
||||||
func parseFichierError(err error) int {
|
|
||||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
code, err := strconv.Atoi(matches[1])
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return code
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
// 1Fichier uses HTTP error code 403 (Forbidden) for all kinds of errors with
|
// Detect this error which the integration tests provoke
|
||||||
// responses looking like this: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||||
//
|
//
|
||||||
// We attempt to parse the actual 1Fichier error code from this body and handle it accordingly
|
// https://1fichier.com/api.html
|
||||||
// Most importantly #374 (Flood detected: IP locked) which the integration tests provoke
|
//
|
||||||
// The list below is far from complete and should be expanded if we see any more error codes.
|
// file/ls.cgi is limited :
|
||||||
if err != nil {
|
//
|
||||||
switch parseFichierError(err) {
|
// Warning (can be changed in case of abuses) :
|
||||||
case 93:
|
// List all files of the account is limited to 1 request per hour.
|
||||||
return false, err // No such user
|
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||||
case 186:
|
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||||
return false, err // IP blocked?
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
case 374:
|
time.Sleep(30 * time.Second)
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
@@ -101,25 +80,16 @@ func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't read file info: %w", err)
|
return nil, errors.Wrap(err, "couldn't read file info")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &file, err
|
return &file, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybe do some actual validation later if necessary
|
|
||||||
func validToken(token *GetTokenResponse) bool {
|
|
||||||
return token.Status == "OK"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||||
request := DownloadRequest{
|
request := DownloadRequest{
|
||||||
URL: url,
|
URL: url,
|
||||||
Single: 1,
|
Single: 1,
|
||||||
Pass: f.opt.FilePassword,
|
|
||||||
}
|
|
||||||
if f.opt.CDN {
|
|
||||||
request.CDN = 1
|
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -129,11 +99,10 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
|
|||||||
var token GetTokenResponse
|
var token GetTokenResponse
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||||
doretry, err := shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
return doretry || !validToken(&token), err
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &token, nil
|
return &token, nil
|
||||||
@@ -149,16 +118,10 @@ func fileFromSharedFile(file *SharedFile) File {
|
|||||||
|
|
||||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: "https://1fichier.com/dir/",
|
RootURL: "https://1fichier.com/dir/",
|
||||||
Path: id,
|
Path: id,
|
||||||
Parameters: map[string][]string{"json": {"1"}},
|
Parameters: map[string][]string{"json": {"1"}},
|
||||||
ContentType: "application/x-www-form-urlencoded",
|
|
||||||
}
|
|
||||||
if f.opt.FolderPassword != "" {
|
|
||||||
opts.Method = "POST"
|
|
||||||
opts.Parameters = nil
|
|
||||||
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var sharedFiles SharedFolderResponse
|
var sharedFiles SharedFolderResponse
|
||||||
@@ -167,7 +130,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||||
@@ -196,7 +159,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
return nil, errors.Wrap(err, "couldn't list files")
|
||||||
}
|
}
|
||||||
for i := range filesList.Items {
|
for i := range filesList.Items {
|
||||||
item := &filesList.Items[i]
|
item := &filesList.Items[i]
|
||||||
@@ -224,7 +187,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list folders: %w", err)
|
return nil, errors.Wrap(err, "couldn't list folders")
|
||||||
}
|
}
|
||||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||||
for i := range foldersList.SubFolders {
|
for i := range foldersList.SubFolders {
|
||||||
@@ -318,7 +281,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create folder: %w", err)
|
return nil, errors.Wrap(err, "couldn't create folder")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||||
@@ -345,10 +308,10 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't remove folder: %w", err)
|
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||||
}
|
}
|
||||||
if response.Status != "OK" {
|
if response.Status != "OK" {
|
||||||
return nil, fmt.Errorf("can't remove folder: %s", response.Message)
|
return nil, errors.New("Can't remove non-empty dir")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||||
@@ -375,7 +338,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't remove file: %w", err)
|
return nil, errors.Wrap(err, "couldn't remove file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||||
@@ -402,33 +365,7 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
return nil, errors.Wrap(err, "couldn't copy file")
|
||||||
}
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
|
||||||
request := &MoveDirRequest{
|
|
||||||
FolderID: folderID,
|
|
||||||
DestinationFolderID: destinationFolderID,
|
|
||||||
Rename: newLeaf,
|
|
||||||
// DestinationUser: destinationUser,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/folder/mv.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &MoveDirResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -453,35 +390,7 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
return nil, errors.Wrap(err, "couldn't copy file")
|
||||||
}
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
|
|
||||||
request := &RenameFileRequest{
|
|
||||||
URLs: []RenameFileURL{
|
|
||||||
{
|
|
||||||
URL: url,
|
|
||||||
Filename: newName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file/rename.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &RenameFileResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
@@ -502,7 +411,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("didn't get an upload node: %w", err)
|
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Got Upload node")
|
// fs.Debugf(f, "Got Upload node")
|
||||||
@@ -516,7 +425,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
|||||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
return nil, errors.New("invalid UploadID")
|
return nil, errors.New("Invalid UploadID")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -546,7 +455,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't upload file: %w", err)
|
return nil, errors.Wrap(err, "couldn't upload file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||||
@@ -558,7 +467,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
|||||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||||
return nil, errors.New("invalid UploadID")
|
return nil, errors.New("Invalid UploadID")
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -580,7 +489,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't finish file upload: %w", err)
|
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
return response, err
|
return response, err
|
||||||
|
|||||||
@@ -1,9 +1,7 @@
|
|||||||
// Package fichier provides an interface to the 1Fichier storage system.
|
|
||||||
package fichier
|
package fichier
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -11,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -36,29 +35,16 @@ func init() {
|
|||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "fichier",
|
Name: "fichier",
|
||||||
Description: "1Fichier",
|
Description: "1Fichier",
|
||||||
NewFs: NewFs,
|
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||||
|
},
|
||||||
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||||
Name: "api_key",
|
Name: "api_key",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Help: "If you want to download a shared folder, add this parameter.",
|
Help: "If you want to download a shared folder, add this parameter",
|
||||||
Name: "shared_folder",
|
Name: "shared_folder",
|
||||||
Advanced: true,
|
Required: false,
|
||||||
}, {
|
|
||||||
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
|
||||||
Name: "file_password",
|
|
||||||
Advanced: true,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
|
||||||
Name: "folder_password",
|
|
||||||
Advanced: true,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
|
||||||
Help: "Set if you wish to use CDN download links.",
|
|
||||||
Name: "cdn",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
@@ -91,12 +77,9 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
APIKey string `config:"api_key"`
|
APIKey string `config:"api_key"`
|
||||||
SharedFolder string `config:"shared_folder"`
|
SharedFolder string `config:"shared_folder"`
|
||||||
FilePassword string `config:"file_password"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
FolderPassword string `config:"folder_password"`
|
|
||||||
CDN bool `config:"cdn"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs is the interface a cloud storage system must provide
|
// Fs is the interface a cloud storage system must provide
|
||||||
@@ -302,7 +285,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
path, ok := f.dirCache.GetInv(directoryID)
|
path, ok := f.dirCache.GetInv(directoryID)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.New("cannot find dir in dircache")
|
return nil, errors.New("Cannot find dir in dircache")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.newObjectFromFile(ctx, path, file), nil
|
return f.newObjectFromFile(ctx, path, file), nil
|
||||||
@@ -340,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if size > int64(300e9) {
|
if size > int64(300e9) {
|
||||||
return nil, errors.New("File too big, can't upload")
|
return nil, errors.New("File too big, cant upload")
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
}
|
}
|
||||||
@@ -442,45 +425,25 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find current directory ID
|
|
||||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
folderID, err := strconv.Atoi(directoryID)
|
||||||
var url string
|
if err != nil {
|
||||||
if currentDirectoryID == directoryID {
|
return nil, err
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
}
|
||||||
if err != nil {
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
if err != nil {
|
||||||
}
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
if resp.Status != "OK" {
|
}
|
||||||
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
if resp.Status != "OK" {
|
||||||
}
|
return nil, errors.New("couldn't move file")
|
||||||
url = resp.URLs[0].URL
|
|
||||||
} else {
|
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
|
||||||
}
|
|
||||||
if resp.Status != "OK" {
|
|
||||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
|
||||||
}
|
|
||||||
url = resp.URLs[0]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := f.readFileInfo(ctx, url)
|
file, err := f.readFileInfo(ctx, resp.URLs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("couldn't read file data")
|
return nil, errors.New("couldn't read file data")
|
||||||
}
|
}
|
||||||
@@ -488,51 +451,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists.
|
|
||||||
//
|
|
||||||
// This is complicated by the fact that we can't use moveDir to move
|
|
||||||
// to a different directory AND rename at the same time as it can
|
|
||||||
// overwrite files in the source directory.
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *MoveDirResponse
|
|
||||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
|
||||||
}
|
|
||||||
if resp.Status != "OK" {
|
|
||||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
srcFs.dirCache.FlushDir(srcRemote)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server side move operations.
|
// Copy src to this remote using server side move operations.
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
@@ -553,10 +471,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
}
|
}
|
||||||
if resp.Status != "OK" {
|
if resp.Status != "OK" {
|
||||||
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
return nil, errors.New("couldn't move file")
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
||||||
@@ -567,32 +485,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
|
||||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/user/info.cgi",
|
|
||||||
ContentType: "application/json",
|
|
||||||
}
|
|
||||||
var accountInfo AccountInfo
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read user info: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME max upload size would be useful to use in Update
|
|
||||||
usage = &fs.Usage{
|
|
||||||
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
|
||||||
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
|
||||||
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
|
||||||
}
|
|
||||||
return usage, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||||
o, err := f.NewObject(ctx, remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
@@ -606,7 +498,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
_ fs.Copier = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
_ fs.PublicLinker = (*Fs)(nil)
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||||
|
|||||||
@@ -2,12 +2,11 @@ package fichier
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -123,7 +122,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// Delete duplicate after successful upload
|
// Delete duplicate after successful upload
|
||||||
err = o.Remove(ctx)
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to remove old version: %w", err)
|
return errors.Wrap(err, "failed to remove old version")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace guts of old object with new one
|
// Replace guts of old object with new one
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ type ListFilesRequest struct {
|
|||||||
type DownloadRequest struct {
|
type DownloadRequest struct {
|
||||||
URL string `json:"url"`
|
URL string `json:"url"`
|
||||||
Single int `json:"single"`
|
Single int `json:"single"`
|
||||||
Pass string `json:"pass,omitempty"`
|
|
||||||
CDN int `json:"cdn,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveFolderRequest is the request structure of the corresponding request
|
// RemoveFolderRequest is the request structure of the corresponding request
|
||||||
@@ -65,25 +63,8 @@ type MoveFileRequest struct {
|
|||||||
|
|
||||||
// MoveFileResponse is the response structure of the corresponding request
|
// MoveFileResponse is the response structure of the corresponding request
|
||||||
type MoveFileResponse struct {
|
type MoveFileResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Message string `json:"message"`
|
URLs []string `json:"urls"`
|
||||||
URLs []string `json:"urls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveDirRequest is the request structure of the corresponding request
|
|
||||||
type MoveDirRequest struct {
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
|
||||||
DestinationUser string `json:"destination_user"`
|
|
||||||
Rename string `json:"rename,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MoveDirResponse is the response structure of the corresponding request
|
|
||||||
type MoveDirResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
OldName string `json:"old_name"`
|
|
||||||
NewName string `json:"new_name"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyFileRequest is the request structure of the corresponding request
|
// CopyFileRequest is the request structure of the corresponding request
|
||||||
@@ -95,42 +76,17 @@ type CopyFileRequest struct {
|
|||||||
|
|
||||||
// CopyFileResponse is the response structure of the corresponding request
|
// CopyFileResponse is the response structure of the corresponding request
|
||||||
type CopyFileResponse struct {
|
type CopyFileResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Message string `json:"message"`
|
Copied int `json:"copied"`
|
||||||
Copied int `json:"copied"`
|
URLs []FileCopy `json:"urls"`
|
||||||
URLs []FileCopy `json:"urls"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileCopy is used in the CopyFileResponse
|
// FileCopy is used in the the CopyFileResponse
|
||||||
type FileCopy struct {
|
type FileCopy struct {
|
||||||
FromURL string `json:"from_url"`
|
FromURL string `json:"from_url"`
|
||||||
ToURL string `json:"to_url"`
|
ToURL string `json:"to_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RenameFileURL is the data structure to rename a single file
|
|
||||||
type RenameFileURL struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenameFileRequest is the request structure of the corresponding request
|
|
||||||
type RenameFileRequest struct {
|
|
||||||
URLs []RenameFileURL `json:"urls"`
|
|
||||||
Pretty int `json:"pretty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenameFileResponse is the response structure of the corresponding request
|
|
||||||
type RenameFileResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Renamed int `json:"renamed"`
|
|
||||||
URLs []struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
OldFilename string `json:"old_filename"`
|
|
||||||
NewFilename string `json:"new_filename"`
|
|
||||||
} `json:"urls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||||
type GetUploadNodeResponse struct {
|
type GetUploadNodeResponse struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
@@ -199,34 +155,3 @@ type FoldersList struct {
|
|||||||
Status string `json:"Status"`
|
Status string `json:"Status"`
|
||||||
SubFolders []Folder `json:"sub_folders"`
|
SubFolders []Folder `json:"sub_folders"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccountInfo is the structure how 1Fichier returns user info
|
|
||||||
type AccountInfo struct {
|
|
||||||
StatsDate string `json:"stats_date"`
|
|
||||||
MailRM string `json:"mail_rm"`
|
|
||||||
DefaultQuota int64 `json:"default_quota"`
|
|
||||||
UploadForbidden string `json:"upload_forbidden"`
|
|
||||||
PageLimit int `json:"page_limit"`
|
|
||||||
ColdStorage int64 `json:"cold_storage"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
UseCDN string `json:"use_cdn"`
|
|
||||||
AvailableColdStorage int64 `json:"available_cold_storage"`
|
|
||||||
DefaultPort string `json:"default_port"`
|
|
||||||
DefaultDomain int `json:"default_domain"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
DownloadMenu string `json:"download_menu"`
|
|
||||||
FTPDID int `json:"ftp_did"`
|
|
||||||
DefaultPortFiles string `json:"default_port_files"`
|
|
||||||
FTPReport string `json:"ftp_report"`
|
|
||||||
OverQuota int64 `json:"overquota"`
|
|
||||||
AvailableStorage int64 `json:"available_storage"`
|
|
||||||
CDN string `json:"cdn"`
|
|
||||||
Offer string `json:"offer"`
|
|
||||||
SubscriptionEnd string `json:"subscription_end"`
|
|
||||||
TFA string `json:"2fa"`
|
|
||||||
AllowedColdStorage int64 `json:"allowed_cold_storage"`
|
|
||||||
HotStorage int64 `json:"hot_storage"`
|
|
||||||
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
|
|
||||||
FTPMode string `json:"ftp_mode"`
|
|
||||||
RUReport string `json:"ru_report"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -19,7 +18,7 @@ const (
|
|||||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents date and time information for the
|
// Time represents represents date and time information for the
|
||||||
// filefabric API
|
// filefabric API
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@@ -52,50 +51,15 @@ func (t Time) String() string {
|
|||||||
return time.Time(t).UTC().Format(timeFormatParameters)
|
return time.Time(t).UTC().Format(timeFormatParameters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int represents an integer which can be represented in JSON as a
|
|
||||||
// quoted integer or an integer.
|
|
||||||
type Int int
|
|
||||||
|
|
||||||
// MarshalJSON turns a Int into JSON
|
|
||||||
func (i *Int) MarshalJSON() (out []byte, err error) {
|
|
||||||
return json.Marshal((*int)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a Int
|
|
||||||
func (i *Int) UnmarshalJSON(data []byte) error {
|
|
||||||
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
|
|
||||||
data = data[1 : len(data)-1]
|
|
||||||
}
|
|
||||||
return json.Unmarshal(data, (*int)(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// String represents an string which can be represented in JSON as a
|
|
||||||
// quoted string or an integer.
|
|
||||||
type String string
|
|
||||||
|
|
||||||
// MarshalJSON turns a String into JSON
|
|
||||||
func (s *String) MarshalJSON() (out []byte, err error) {
|
|
||||||
return json.Marshal((*string)(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a String
|
|
||||||
func (s *String) UnmarshalJSON(data []byte) error {
|
|
||||||
err := json.Unmarshal(data, (*string)(s))
|
|
||||||
if err != nil {
|
|
||||||
*s = String(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status return returned in all status responses
|
// Status return returned in all status responses
|
||||||
type Status struct {
|
type Status struct {
|
||||||
Code string `json:"status"`
|
Code string `json:"status"`
|
||||||
Message string `json:"statusmessage"`
|
Message string `json:"statusmessage"`
|
||||||
TaskID String `json:"taskid"`
|
TaskID string `json:"taskid"`
|
||||||
// Warning string `json:"warning"` // obsolete
|
// Warning string `json:"warning"` // obsolete
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status satisfies the error interface
|
// Status statisfies the error interface
|
||||||
func (e *Status) Error() string {
|
func (e *Status) Error() string {
|
||||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||||
}
|
}
|
||||||
@@ -151,7 +115,7 @@ type GetFolderContentsResponse struct {
|
|||||||
Total int `json:"total,string"`
|
Total int `json:"total,string"`
|
||||||
Items []Item `json:"filelist"`
|
Items []Item `json:"filelist"`
|
||||||
Folder Item `json:"folder"`
|
Folder Item `json:"folder"`
|
||||||
From Int `json:"from"`
|
From int `json:"from,string"`
|
||||||
//Count int `json:"count"`
|
//Count int `json:"count"`
|
||||||
Pid string `json:"pid"`
|
Pid string `json:"pid"`
|
||||||
RefreshResult Status `json:"refreshresult"`
|
RefreshResult Status `json:"refreshresult"`
|
||||||
|
|||||||
@@ -17,9 +17,9 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/filefabric/api"
|
"github.com/rclone/rclone/backend/filefabric/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -64,7 +65,7 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of the Enterprise File Fabric to connect to.",
|
Help: "URL of the Enterprise File Fabric to connect to",
|
||||||
Required: true,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://storagemadeeasy.com",
|
Value: "https://storagemadeeasy.com",
|
||||||
@@ -78,16 +79,14 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: `ID of the root folder.
|
Help: `ID of the root folder
|
||||||
|
|
||||||
Leave blank normally.
|
Leave blank normally.
|
||||||
|
|
||||||
Fill in to make rclone start with directory of a given ID.
|
Fill in to make rclone start with directory of a given ID.
|
||||||
`,
|
`,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "permanent_token",
|
Name: "permanent_token",
|
||||||
Help: `Permanent Authentication Token.
|
Help: `Permanent Authentication Token
|
||||||
|
|
||||||
A Permanent Authentication Token can be created in the Enterprise File
|
A Permanent Authentication Token can be created in the Enterprise File
|
||||||
Fabric, on the users Dashboard under Security, there is an entry
|
Fabric, on the users Dashboard under Security, there is an entry
|
||||||
@@ -98,28 +97,26 @@ These tokens are normally valid for several years.
|
|||||||
|
|
||||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||||
`,
|
`,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "token",
|
Name: "token",
|
||||||
Help: `Session Token.
|
Help: `Session Token
|
||||||
|
|
||||||
This is a session token which rclone caches in the config file. It is
|
This is a session token which rclone caches in the config file. It is
|
||||||
usually valid for 1 hour.
|
usually valid for 1 hour.
|
||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "token_expiry",
|
Name: "token_expiry",
|
||||||
Help: `Token expiry time.
|
Help: `Token expiry time
|
||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "version",
|
Name: "version",
|
||||||
Help: `Version read from the file fabric.
|
Help: `Version read from the file fabric
|
||||||
|
|
||||||
Don't set this value - rclone will set it automatically.
|
Don't set this value - rclone will set it automatically.
|
||||||
`,
|
`,
|
||||||
@@ -152,15 +149,15 @@ type Fs struct {
|
|||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
m configmap.Mapper // to save config
|
m configmap.Mapper // to save config
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenMu sync.Mutex // hold when reading the token
|
tokenMu sync.Mutex // hold when reading the token
|
||||||
token string // current access token
|
token string // current access token
|
||||||
tokenExpiry time.Time // time the current token expires
|
tokenExpiry time.Time // time the current token expires
|
||||||
tokenExpired atomic.Int32
|
tokenExpired int32 // read and written with atomic
|
||||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||||
precision time.Duration // precision reported
|
precision time.Duration // precision reported
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a filefabric object
|
// Object describes a filefabric object
|
||||||
@@ -225,14 +222,13 @@ var retryStatusCodes = []struct {
|
|||||||
// delete in that folder. Please try again later or use
|
// delete in that folder. Please try again later or use
|
||||||
// another name. (error_background)
|
// another name. (error_background)
|
||||||
code: "error_background",
|
code: "error_background",
|
||||||
sleep: 1 * time.Second,
|
sleep: 6 * time.Second,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
// try should be the number of the tries so far, counting up from 1
|
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
|
||||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError, try int) (bool, error) {
|
|
||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -243,15 +239,14 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
|||||||
err = status // return the error from the RPC
|
err = status // return the error from the RPC
|
||||||
code := status.GetCode()
|
code := status.GetCode()
|
||||||
if code == "login_token_expired" {
|
if code == "login_token_expired" {
|
||||||
f.tokenExpired.Add(1)
|
atomic.AddInt32(&f.tokenExpired, 1)
|
||||||
} else {
|
} else {
|
||||||
for _, retryCode := range retryStatusCodes {
|
for _, retryCode := range retryStatusCodes {
|
||||||
if code == retryCode.code {
|
if code == retryCode.code {
|
||||||
if retryCode.sleep > 0 {
|
if retryCode.sleep > 0 {
|
||||||
// make this thread only sleep exponentially increasing extra time
|
// make this thread only sleep extra time
|
||||||
sleepTime := retryCode.sleep << (try - 1)
|
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
|
||||||
fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", sleepTime, retryCode.code)
|
time.Sleep(retryCode.sleep)
|
||||||
time.Sleep(sleepTime)
|
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@@ -269,7 +264,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string
|
|||||||
"pid": rootID,
|
"pid": rootID,
|
||||||
}, &resp, nil)
|
}, &resp, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to check path exists: %w", err)
|
return nil, errors.Wrap(err, "failed to check path exists")
|
||||||
}
|
}
|
||||||
if resp.Exists != "y" {
|
if resp.Exists != "y" {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -310,7 +305,7 @@ func (f *Fs) getApplianceInfo(ctx context.Context) error {
|
|||||||
"token": "*",
|
"token": "*",
|
||||||
}, &applianceInfo, nil)
|
}, &applianceInfo, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to read appliance version: %w", err)
|
return errors.Wrap(err, "failed to read appliance version")
|
||||||
}
|
}
|
||||||
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
f.opt.Version = applianceInfo.SoftwareVersionLabel
|
||||||
f.m.Set("version", f.opt.Version)
|
f.m.Set("version", f.opt.Version)
|
||||||
@@ -323,12 +318,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
var refreshed = false
|
var refreshed = false
|
||||||
defer func() {
|
defer func() {
|
||||||
if refreshed {
|
if refreshed {
|
||||||
f.tokenExpired.Store(0)
|
atomic.StoreInt32(&f.tokenExpired, 0)
|
||||||
}
|
}
|
||||||
f.tokenMu.Unlock()
|
f.tokenMu.Unlock()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
expired := f.tokenExpired.Load() != 0
|
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
||||||
if expired {
|
if expired {
|
||||||
fs.Debugf(f, "Token invalid - refreshing")
|
fs.Debugf(f, "Token invalid - refreshing")
|
||||||
}
|
}
|
||||||
@@ -351,7 +346,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
"authtoken": f.opt.PermanentToken,
|
"authtoken": f.opt.PermanentToken,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to get session token: %w", err)
|
return "", errors.Wrap(err, "failed to get session token")
|
||||||
}
|
}
|
||||||
refreshed = true
|
refreshed = true
|
||||||
now = now.Add(tokenLifeTime)
|
now = now.Add(tokenLifeTime)
|
||||||
@@ -375,7 +370,7 @@ type params map[string]interface{}
|
|||||||
|
|
||||||
// rpc calls the rpc.php method of the SME file fabric
|
// rpc calls the rpc.php method of the SME file fabric
|
||||||
//
|
//
|
||||||
// This is an entry point to all the method calls.
|
// This is an entry point to all the method calls
|
||||||
//
|
//
|
||||||
// If result is nil then resp.Body will need closing
|
// If result is nil then resp.Body will need closing
|
||||||
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
|
||||||
@@ -405,13 +400,11 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
|
|||||||
ContentType: "application/x-www-form-urlencoded",
|
ContentType: "application/x-www-form-urlencoded",
|
||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
try := 0
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
try++
|
|
||||||
// Refresh the body each retry
|
// Refresh the body each retry
|
||||||
opts.Body = strings.NewReader(data.Encode())
|
opts.Body = strings.NewReader(data.Encode())
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
|
||||||
return f.shouldRetry(ctx, resp, err, result, try)
|
return f.shouldRetry(ctx, resp, err, result)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return resp, err
|
return resp, err
|
||||||
@@ -492,7 +485,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// Root is a dir - cache its ID
|
// Root is a dir - cache its ID
|
||||||
f.dirCache.Put(f.root, info.ID)
|
f.dirCache.Put(f.root, info.ID)
|
||||||
}
|
}
|
||||||
//} else {
|
} else {
|
||||||
// Root is not found so a directory
|
// Root is not found so a directory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -564,7 +557,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
"fi_name": f.opt.Enc.FromStandardName(leaf),
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to create directory: %w", err)
|
return "", errors.Wrap(err, "failed to create directory")
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
return info.Item.ID, nil
|
return info.Item.ID, nil
|
||||||
@@ -597,7 +590,7 @@ OUTER:
|
|||||||
var info api.GetFolderContentsResponse
|
var info api.GetFolderContentsResponse
|
||||||
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to list directory: %w", err)
|
return false, errors.Wrap(err, "failed to list directory")
|
||||||
}
|
}
|
||||||
for i := range info.Items {
|
for i := range info.Items {
|
||||||
item := &info.Items[i]
|
item := &info.Items[i]
|
||||||
@@ -680,7 +673,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the object, leaf, directoryID and error.
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
@@ -699,7 +692,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
|
|
||||||
// Put the object
|
// Put the object
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -728,7 +721,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
|
|||||||
"completedeletion": "n",
|
"completedeletion": "n",
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to delete file: %w", err)
|
return errors.Wrap(err, "failed to delete file")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -765,7 +758,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
f.dirCache.FlushDir(dir)
|
f.dirCache.FlushDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to remove directory: %w", err)
|
return errors.Wrap(err, "failed to remove directory")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -785,9 +778,9 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -827,7 +820,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to copy file: %w", err)
|
return nil, errors.Wrap(err, "failed to copy file")
|
||||||
}
|
}
|
||||||
err = dstObj.setMetaData(&info.Item)
|
err = dstObj.setMetaData(&info.Item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -845,8 +838,8 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the background task to complete if necessary
|
// Wait for the the background task to complete if necessary
|
||||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
|
||||||
if taskID == "" || taskID == "0" {
|
if taskID == "" || taskID == "0" {
|
||||||
// No task to wait for
|
// No task to wait for
|
||||||
return nil
|
return nil
|
||||||
@@ -859,7 +852,7 @@ func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err
|
|||||||
"taskid": taskID,
|
"taskid": taskID,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to wait for task %s to complete: %w", taskID, err)
|
return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
|
||||||
}
|
}
|
||||||
if len(info.Tasks) == 0 {
|
if len(info.Tasks) == 0 {
|
||||||
// task has finished
|
// task has finished
|
||||||
@@ -892,7 +885,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf stri
|
|||||||
"fi_name": newLeaf,
|
"fi_name": newLeaf,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to rename leaf: %w", err)
|
return nil, errors.Wrap(err, "failed to rename leaf")
|
||||||
}
|
}
|
||||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -936,7 +929,7 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
|||||||
"dir_id": newDirectoryID,
|
"dir_id": newDirectoryID,
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to move file to new directory: %w", err)
|
return nil, errors.Wrap(err, "failed to move file to new directory")
|
||||||
}
|
}
|
||||||
item = &info.Item
|
item = &info.Item
|
||||||
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
|
||||||
@@ -958,9 +951,9 @@ func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDire
|
|||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
@@ -1039,7 +1032,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
var info api.EmptyResponse
|
var info api.EmptyResponse
|
||||||
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to empty trash: %w", err)
|
return errors.Wrap(err, "failed to empty trash")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1096,7 +1089,7 @@ func (o *Object) Size() int64 {
|
|||||||
// setMetaData sets the metadata from info
|
// setMetaData sets the metadata from info
|
||||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||||
if info.Type != api.ItemTypeFile {
|
if info.Type != api.ItemTypeFile {
|
||||||
return fs.ErrorIsDir
|
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||||
}
|
}
|
||||||
o.hasMetaData = true
|
o.hasMetaData = true
|
||||||
o.size = info.Size
|
o.size = info.Size
|
||||||
@@ -1137,6 +1130,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
//
|
//
|
||||||
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
@@ -1165,7 +1159,7 @@ func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
|
|||||||
"data": data.String(),
|
"data": data.String(),
|
||||||
}, &info, nil)
|
}, &info, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to update metadata: %w", err)
|
return errors.Wrap(err, "failed to update metadata")
|
||||||
}
|
}
|
||||||
return o.setMetaData(&info.Item)
|
return o.setMetaData(&info.Item)
|
||||||
}
|
}
|
||||||
@@ -1188,7 +1182,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
if o.contentType == emptyMimeType {
|
if o.contentType == emptyMimeType {
|
||||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
resp, err := o.fs.rpc(ctx, "getFile", params{
|
resp, err := o.fs.rpc(ctx, "getFile", params{
|
||||||
@@ -1202,7 +1196,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// If existing is set then it updates the object rather than creating a new one.
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
@@ -1248,7 +1242,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to initialize upload: %w", err)
|
return errors.Wrap(err, "failed to initialize upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the upload if aborted or it fails
|
// Cancel the upload if aborted or it fails
|
||||||
@@ -1284,20 +1278,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var contentLength = size
|
var contentLength = size
|
||||||
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
|
||||||
}
|
}
|
||||||
try := 0
|
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
try++
|
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
|
||||||
return o.fs.shouldRetry(ctx, resp, err, nil, try)
|
return o.fs.shouldRetry(ctx, resp, err, nil)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to upload: %w", err)
|
return errors.Wrap(err, "failed to upload")
|
||||||
}
|
}
|
||||||
if uploader.Success != "y" {
|
if uploader.Success != "y" {
|
||||||
return fmt.Errorf("upload failed")
|
return errors.Errorf("upload failed")
|
||||||
}
|
}
|
||||||
if size > 0 && uploader.FileSize != size {
|
if size > 0 && uploader.FileSize != size {
|
||||||
return fmt.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now finalize the file
|
// Now finalize the file
|
||||||
@@ -1309,7 +1301,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to finalize upload: %w", err)
|
return errors.Wrap(err, "failed to finalize upload")
|
||||||
}
|
}
|
||||||
finalized = true
|
finalized = true
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ package ftp
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
@@ -16,6 +14,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -28,7 +27,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/proxy"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -46,96 +44,66 @@ const (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "ftp",
|
Name: "ftp",
|
||||||
Description: "FTP",
|
Description: "FTP Connection",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
Help: "FTP host to connect to",
|
||||||
Required: true,
|
Required: true,
|
||||||
Sensitive: true,
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "ftp.example.com",
|
||||||
|
Help: "Connect to ftp.example.com",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username.",
|
Help: "FTP username, leave blank for current username, " + currentUser,
|
||||||
Default: currentUser,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port number.",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
Default: 21,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "FTP password.",
|
Help: "FTP password",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "tls",
|
Name: "tls",
|
||||||
Help: `Use Implicit FTPS (FTP over TLS).
|
Help: `Use Implicit FTPS (FTP over TLS)
|
||||||
|
|
||||||
When using implicit FTP over TLS the client connects using TLS
|
When using implicit FTP over TLS the client connects using TLS
|
||||||
right from the start which breaks compatibility with
|
right from the start which breaks compatibility with
|
||||||
non-TLS-aware servers. This is usually served over port 990 rather
|
non-TLS-aware servers. This is usually served over port 990 rather
|
||||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "explicit_tls",
|
Name: "explicit_tls",
|
||||||
Help: `Use Explicit FTPS (FTP over TLS).
|
Help: `Use Explicit FTPS (FTP over TLS)
|
||||||
|
|
||||||
When using explicit FTP over TLS the client explicitly requests
|
When using explicit FTP over TLS the client explicitly requests
|
||||||
security from the server in order to upgrade a plain text connection
|
security from the server in order to upgrade a plain text connection
|
||||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "concurrency",
|
Name: "concurrency",
|
||||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||||
|
|
||||||
Note that setting this is very likely to cause deadlocks so it should
|
|
||||||
be used with care.
|
|
||||||
|
|
||||||
If you are doing a sync or copy then make sure concurrency is one more
|
|
||||||
than the sum of |--transfers| and |--checkers|.
|
|
||||||
|
|
||||||
If you use |--check-first| then it just needs to be one more than the
|
|
||||||
maximum of |--checkers| and |--transfers|.
|
|
||||||
|
|
||||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
|
||||||
--check-first| or |--checkers 1 --transfers 1|.
|
|
||||||
|
|
||||||
`, "|", "`", -1),
|
|
||||||
Default: 0,
|
Default: 0,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "no_check_certificate",
|
Name: "no_check_certificate",
|
||||||
Help: "Do not verify the TLS certificate of the server.",
|
Help: "Do not verify the TLS certificate of the server",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_epsv",
|
Name: "disable_epsv",
|
||||||
Help: "Disable using EPSV even if server advertises support.",
|
Help: "Disable using EPSV even if server advertises support",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_mlsd",
|
Name: "disable_mlsd",
|
||||||
Help: "Disable using MLSD even if server advertises support.",
|
Help: "Disable using MLSD even if server advertises support",
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "disable_utf8",
|
|
||||||
Help: "Disable using UTF-8 even if server advertises support.",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "writing_mdtm",
|
|
||||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "force_list_hidden",
|
|
||||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "idle_timeout",
|
Name: "idle_timeout",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
Help: `Max time before closing idle connections.
|
Help: `Max time before closing idle connections
|
||||||
|
|
||||||
If no connections have been returned to the connection pool in the time
|
If no connections have been returned to the connection pool in the time
|
||||||
given, rclone will empty the connection pool.
|
given, rclone will empty the connection pool.
|
||||||
@@ -148,63 +116,17 @@ Set to 0 to keep connections indefinitely.
|
|||||||
Help: "Maximum time to wait for a response to close.",
|
Help: "Maximum time to wait for a response to close.",
|
||||||
Default: fs.Duration(60 * time.Second),
|
Default: fs.Duration(60 * time.Second),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "tls_cache_size",
|
|
||||||
Help: `Size of TLS session cache for all control and data connections.
|
|
||||||
|
|
||||||
TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
|
||||||
Increase if default size is not enough resulting in TLS resumption errors.
|
|
||||||
Enabled by default. Use 0 to disable.`,
|
|
||||||
Default: 32,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "disable_tls13",
|
|
||||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "shut_timeout",
|
|
||||||
Help: "Maximum time to wait for data connection closing status.",
|
|
||||||
Default: fs.Duration(60 * time.Second),
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "ask_password",
|
|
||||||
Default: false,
|
|
||||||
Help: `Allow asking for FTP password when needed.
|
|
||||||
|
|
||||||
If this is set and no password is supplied then rclone will ask for a password
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "socks_proxy",
|
|
||||||
Default: "",
|
|
||||||
Help: `Socks 5 proxy host.
|
|
||||||
|
|
||||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
myUser:myPass@localhost:9005
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
// The FTP protocol can't handle trailing spaces
|
// The FTP protocol can't handle trailing spaces (for instance
|
||||||
// (for instance, pureftpd turns them into '_')
|
// pureftpd turns them into _)
|
||||||
|
//
|
||||||
|
// proftpd can't handle '*' in file names
|
||||||
|
// pureftpd can't handle '[', ']' or '*'
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeRightSpace),
|
encoder.EncodeRightSpace),
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "Asterisk,Ctl,Dot,Slash",
|
|
||||||
Help: "ProFTPd can't handle '*' in file names",
|
|
||||||
}, {
|
|
||||||
Value: "BackSlash,Ctl,Del,Dot,RightSpace,Slash,SquareBracket",
|
|
||||||
Help: "PureFTPd can't handle '[]' or '*' in file names",
|
|
||||||
}, {
|
|
||||||
Value: "Ctl,LeftPeriod,Slash",
|
|
||||||
Help: "VsFTPd can't handle file names starting with dot",
|
|
||||||
}},
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -217,21 +139,13 @@ type Options struct {
|
|||||||
Port string `config:"port"`
|
Port string `config:"port"`
|
||||||
TLS bool `config:"tls"`
|
TLS bool `config:"tls"`
|
||||||
ExplicitTLS bool `config:"explicit_tls"`
|
ExplicitTLS bool `config:"explicit_tls"`
|
||||||
TLSCacheSize int `config:"tls_cache_size"`
|
|
||||||
DisableTLS13 bool `config:"disable_tls13"`
|
|
||||||
Concurrency int `config:"concurrency"`
|
Concurrency int `config:"concurrency"`
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
DisableMLSD bool `config:"disable_mlsd"`
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
DisableUTF8 bool `config:"disable_utf8"`
|
|
||||||
WritingMDTM bool `config:"writing_mdtm"`
|
|
||||||
ForceListHidden bool `config:"force_list_hidden"`
|
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
|
||||||
AskPassword bool `config:"ask_password"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
SocksProxy string `config:"socks_proxy"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@@ -249,10 +163,8 @@ type Fs struct {
|
|||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
tokens *pacer.TokenDispenser
|
tokens *pacer.TokenDispenser
|
||||||
|
tlsConf *tls.Config
|
||||||
pacer *fs.Pacer // pacer for FTP connections
|
pacer *fs.Pacer // pacer for FTP connections
|
||||||
fGetTime bool // true if the ftp library accepts GetTime
|
|
||||||
fSetTime bool // true if the ftp library accepts SetTime
|
|
||||||
fLstTime bool // true if the List call returns precise time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -267,7 +179,6 @@ type FileInfo struct {
|
|||||||
Name string
|
Name string
|
||||||
Size uint64
|
Size uint64
|
||||||
ModTime time.Time
|
ModTime time.Time
|
||||||
precise bool // true if the time is precise
|
|
||||||
IsDir bool
|
IsDir bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -330,23 +241,21 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
|||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a *textproto.Error if err contains one or nil otherwise
|
type dialCtx struct {
|
||||||
func textprotoError(err error) (errX *textproto.Error) {
|
f *Fs
|
||||||
if errors.As(err, &errX) {
|
ctx context.Context
|
||||||
return errX
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns true if this FTP error should be retried
|
// dial a new connection with fshttp dialer
|
||||||
func isRetriableFtpError(err error) bool {
|
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
|
||||||
if errX := textprotoError(err); errX != nil {
|
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
|
||||||
switch errX.Code {
|
if err != nil {
|
||||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
return nil, err
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
if d.f.tlsConf != nil {
|
||||||
|
conn = tls.Client(conn, d.f.tlsConf)
|
||||||
|
}
|
||||||
|
return conn, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||||
@@ -355,93 +264,29 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
if fserrors.ContextError(ctx, &err) {
|
if fserrors.ContextError(ctx, &err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if isRetriableFtpError(err) {
|
switch errX := err.(type) {
|
||||||
return true, err
|
case *textproto.Error:
|
||||||
|
switch errX.Code {
|
||||||
|
case ftp.StatusNotAvailable:
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a TLS config with a unique session cache.
|
|
||||||
//
|
|
||||||
// We can't share session caches between connections.
|
|
||||||
//
|
|
||||||
// See: https://github.com/rclone/rclone/issues/7234
|
|
||||||
func (f *Fs) tlsConfig() *tls.Config {
|
|
||||||
var tlsConfig *tls.Config
|
|
||||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
|
||||||
tlsConfig = &tls.Config{
|
|
||||||
ServerName: f.opt.Host,
|
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
|
||||||
}
|
|
||||||
if f.opt.TLSCacheSize > 0 {
|
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
|
||||||
}
|
|
||||||
if f.opt.DisableTLS13 {
|
|
||||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tlsConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
|
dCtx := dialCtx{f, ctx}
|
||||||
// tls.Config for this connection only. Will be used for data
|
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
|
||||||
// and control connections.
|
if f.opt.ExplicitTLS {
|
||||||
tlsConfig := f.tlsConfig()
|
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||||
|
// Initial connection needs to be cleartext for explicit TLS
|
||||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||||
initialConnection := true
|
|
||||||
dial := func(network, address string) (conn net.Conn, err error) {
|
|
||||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
|
||||||
defer func() {
|
|
||||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
|
||||||
}()
|
|
||||||
baseDialer := fshttp.NewDialer(ctx)
|
|
||||||
if f.opt.SocksProxy != "" {
|
|
||||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
|
||||||
} else {
|
|
||||||
conn, err = baseDialer.Dial(network, address)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Connect using cleartext only for non TLS
|
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||||
if tlsConfig == nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
// Initial connection only needs to be cleartext for explicit TLS
|
|
||||||
if f.opt.ExplicitTLS && initialConnection {
|
|
||||||
initialConnection = false
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
// Upgrade connection to TLS
|
|
||||||
tlsConn := tls.Client(conn, tlsConfig)
|
|
||||||
// Do the initial handshake - tls.Client doesn't do it for us
|
|
||||||
// If we do this then connections to proftpd/pureftpd lock up
|
|
||||||
// See: https://github.com/rclone/rclone/issues/6426
|
|
||||||
// See: https://github.com/jlaffaye/ftp/issues/282
|
|
||||||
if false {
|
|
||||||
err = tlsConn.HandshakeContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
_ = conn.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tlsConn, nil
|
|
||||||
}
|
|
||||||
ftpConfig := []ftp.DialOption{
|
|
||||||
ftp.DialWithContext(ctx),
|
|
||||||
ftp.DialWithDialFunc(dial),
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.opt.TLS {
|
|
||||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
|
||||||
// as a trigger for sending PSBZ and PROT options to server.
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
|
||||||
} else if f.opt.ExplicitTLS {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
|
||||||
}
|
}
|
||||||
if f.opt.DisableEPSV {
|
if f.opt.DisableEPSV {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||||
@@ -449,18 +294,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
if f.opt.DisableMLSD {
|
if f.opt.DisableMLSD {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
|
||||||
}
|
}
|
||||||
if f.opt.DisableUTF8 {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledUTF8(true))
|
|
||||||
}
|
|
||||||
if f.opt.ShutTimeout != 0 && f.opt.ShutTimeout != fs.DurationOff {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithShutTimeout(time.Duration(f.opt.ShutTimeout)))
|
|
||||||
}
|
|
||||||
if f.opt.WritingMDTM {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
|
||||||
}
|
|
||||||
if f.opt.ForceListHidden {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
|
||||||
}
|
|
||||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||||
}
|
}
|
||||||
@@ -477,7 +310,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("failed to make FTP connection to %q: %w", f.dialAddr, err)
|
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
|
||||||
}
|
}
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
@@ -524,7 +357,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
*pc = nil
|
*pc = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If not a regular FTP error code then check the connection
|
// If not a regular FTP error code then check the connection
|
||||||
if tpErr := textprotoError(err); tpErr != nil {
|
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||||
|
if !isRegularError {
|
||||||
nopErr := c.NoOp()
|
nopErr := c.NoOp()
|
||||||
if nopErr != nil {
|
if nopErr != nil {
|
||||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||||
@@ -570,14 +404,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
pass := ""
|
pass, err := obscure.Reveal(opt.Pass)
|
||||||
if opt.AskPassword && opt.Pass == "" {
|
if err != nil {
|
||||||
pass = config.GetPassword("FTP server password")
|
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||||
} else {
|
|
||||||
pass, err = obscure.Reveal(opt.Pass)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("NewFS decrypt password: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
user := opt.User
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
@@ -594,7 +423,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
protocol = "ftps://"
|
protocol = "ftps://"
|
||||||
}
|
}
|
||||||
if opt.TLS && opt.ExplicitTLS {
|
if opt.TLS && opt.ExplicitTLS {
|
||||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
|
||||||
|
}
|
||||||
|
var tlsConfig *tls.Config
|
||||||
|
if opt.TLS || opt.ExplicitTLS {
|
||||||
|
tlsConfig = &tls.Config{
|
||||||
|
ServerName: opt.Host,
|
||||||
|
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
u := protocol + path.Join(dialAddr+"/", root)
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -608,11 +444,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
|
tlsConf: tlsConfig,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
PartialUploads: true,
|
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
// set the pool drainer timer going
|
// set the pool drainer timer going
|
||||||
if f.opt.IdleTimeout > 0 {
|
if f.opt.IdleTimeout > 0 {
|
||||||
@@ -621,13 +457,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
// Make a connection and pool it to return errors early
|
// Make a connection and pool it to return errors early
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFs: %w", err)
|
return nil, errors.Wrap(err, "NewFs")
|
||||||
}
|
|
||||||
f.fGetTime = c.IsGetTimeSupported()
|
|
||||||
f.fSetTime = c.IsSetTimeSupported()
|
|
||||||
f.fLstTime = c.IsTimePreciseInList()
|
|
||||||
if !f.fLstTime && f.fGetTime {
|
|
||||||
f.features.SlowModTime = true
|
|
||||||
}
|
}
|
||||||
f.putFtpConnection(&c, nil)
|
f.putFtpConnection(&c, nil)
|
||||||
if root != "" {
|
if root != "" {
|
||||||
@@ -639,7 +469,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
|
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
f.root = root
|
f.root = root
|
||||||
return f, nil
|
return f, nil
|
||||||
@@ -660,7 +490,8 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
|
|
||||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||||
func translateErrorFile(err error) error {
|
func translateErrorFile(err error) error {
|
||||||
if errX := textprotoError(err); errX != nil {
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||||
err = fs.ErrorObjectNotFound
|
err = fs.ErrorObjectNotFound
|
||||||
@@ -671,7 +502,8 @@ func translateErrorFile(err error) error {
|
|||||||
|
|
||||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||||
func translateErrorDir(err error) error {
|
func translateErrorDir(err error) error {
|
||||||
if errX := textprotoError(err); errX != nil {
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||||
err = fs.ErrorDirNotFound
|
err = fs.ErrorDirNotFound
|
||||||
@@ -702,7 +534,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
|||||||
// findItem finds a directory entry for the name in its parent directory
|
// findItem finds a directory entry for the name in its parent directory
|
||||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
if remote == "" || remote == "." || remote == "/" {
|
fullPath := path.Join(f.root, remote)
|
||||||
|
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||||
// if root, assume exists and synthesize an entry
|
// if root, assume exists and synthesize an entry
|
||||||
return &ftp.Entry{
|
return &ftp.Entry{
|
||||||
Name: "",
|
Name: "",
|
||||||
@@ -710,38 +543,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
|||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
dir := path.Dir(fullPath)
|
||||||
|
base := path.Base(fullPath)
|
||||||
|
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("findItem: %w", err)
|
return nil, errors.Wrap(err, "findItem")
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
|
||||||
if c.IsTimePreciseInList() {
|
|
||||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
|
||||||
f.putFtpConnection(&c, err)
|
|
||||||
if err != nil {
|
|
||||||
err = translateErrorFile(err)
|
|
||||||
if err == fs.ErrorObjectNotFound {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if errX := textprotoError(err); errX != nil {
|
|
||||||
switch errX.Code {
|
|
||||||
case ftp.StatusBadArguments:
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if entry != nil {
|
|
||||||
f.entryToStandard(entry)
|
|
||||||
}
|
|
||||||
return entry, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dir := path.Dir(remote)
|
|
||||||
base := path.Base(remote)
|
|
||||||
|
|
||||||
files, err := c.List(f.dirFromStandardPath(dir))
|
files, err := c.List(f.dirFromStandardPath(dir))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -760,7 +568,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
|||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -769,12 +577,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
o.info = &FileInfo{
|
info := &FileInfo{
|
||||||
Name: remote,
|
Name: remote,
|
||||||
Size: entry.Size,
|
Size: entry.Size,
|
||||||
ModTime: entry.Time,
|
ModTime: entry.Time,
|
||||||
precise: f.fLstTime,
|
|
||||||
}
|
}
|
||||||
|
o.info = info
|
||||||
|
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -782,9 +591,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
|||||||
|
|
||||||
// dirExists checks the directory pointed to by remote exists or not
|
// dirExists checks the directory pointed to by remote exists or not
|
||||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
entry, err := f.findItem(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("dirExists: %w", err)
|
return false, errors.Wrap(err, "dirExists")
|
||||||
}
|
}
|
||||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||||
return true, nil
|
return true, nil
|
||||||
@@ -805,7 +614,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list: %w", err)
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
|
|
||||||
var listErr error
|
var listErr error
|
||||||
@@ -834,7 +643,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
// if timer fired assume no error but connection dead
|
// if timer fired assume no error but connection dead
|
||||||
fs.Errorf(f, "Timeout when waiting for List")
|
fs.Errorf(f, "Timeout when waiting for List")
|
||||||
return nil, errors.New("timeout when waiting for List")
|
return nil, errors.New("Timeout when waiting for List")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Annoyingly FTP returns success for a directory which
|
// Annoyingly FTP returns success for a directory which
|
||||||
@@ -843,7 +652,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if len(files) == 0 {
|
if len(files) == 0 {
|
||||||
exists, err := f.dirExists(ctx, dir)
|
exists, err := f.dirExists(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("list: %w", err)
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@@ -869,7 +678,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
Name: newremote,
|
Name: newremote,
|
||||||
Size: object.Size,
|
Size: object.Size,
|
||||||
ModTime: object.Time,
|
ModTime: object.Time,
|
||||||
precise: f.fLstTime,
|
|
||||||
}
|
}
|
||||||
o.info = info
|
o.info = info
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
@@ -883,19 +691,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision shows whether modified time is supported or not depending on the
|
// Precision shows Modified Time not supported
|
||||||
// FTP server capabilities, namely whether FTP server:
|
|
||||||
// - accepts the MDTM command to get file time (fGetTime)
|
|
||||||
// or supports MLSD returning precise file time in the list (fLstTime)
|
|
||||||
// - accepts the MFMT command to set file time (fSetTime)
|
|
||||||
// or non-standard form of the MDTM command (fSetTime, too)
|
|
||||||
// used by VsFtpd for the same purpose (WritingMDTM)
|
|
||||||
//
|
|
||||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
func (f *Fs) Precision() time.Duration {
|
||||||
if (f.fGetTime || f.fLstTime) && f.fSetTime {
|
|
||||||
return time.Second
|
|
||||||
}
|
|
||||||
return fs.ModTimeNotSupported
|
return fs.ModTimeNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -908,7 +705,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(ctx, src.Remote())
|
err := f.mkParentDir(ctx, src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Put mkParentDir failed: %w", err)
|
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||||
}
|
}
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@@ -926,18 +723,31 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||||
file, err := f.findItem(ctx, remote)
|
dir := path.Dir(remote)
|
||||||
|
base := path.Base(remote)
|
||||||
|
|
||||||
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "getInfo")
|
||||||
} else if file != nil {
|
}
|
||||||
info := &FileInfo{
|
files, err := c.List(f.dirFromStandardPath(dir))
|
||||||
Name: remote,
|
f.putFtpConnection(&c, err)
|
||||||
Size: file.Size,
|
if err != nil {
|
||||||
ModTime: file.Time,
|
return nil, translateErrorFile(err)
|
||||||
precise: f.fLstTime,
|
}
|
||||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
|
||||||
|
for i := range files {
|
||||||
|
file := files[i]
|
||||||
|
f.entryToStandard(file)
|
||||||
|
if file.Name == base {
|
||||||
|
info := &FileInfo{
|
||||||
|
Name: remote,
|
||||||
|
Size: file.Size,
|
||||||
|
ModTime: file.Time,
|
||||||
|
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
return info, nil
|
|
||||||
}
|
}
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
@@ -955,7 +765,7 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||||||
}
|
}
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
} else if err != fs.ErrorObjectNotFound {
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
return fmt.Errorf("mkdir %q failed: %w", abspath, err)
|
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||||
}
|
}
|
||||||
parent := path.Dir(abspath)
|
parent := path.Dir(abspath)
|
||||||
err = f.mkdir(ctx, parent)
|
err = f.mkdir(ctx, parent)
|
||||||
@@ -964,11 +774,12 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
|||||||
}
|
}
|
||||||
c, connErr := f.getFtpConnection(ctx)
|
c, connErr := f.getFtpConnection(ctx)
|
||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return fmt.Errorf("mkdir: %w", connErr)
|
return errors.Wrap(connErr, "mkdir")
|
||||||
}
|
}
|
||||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if errX := textprotoError(err); errX != nil {
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||||
err = nil
|
err = nil
|
||||||
@@ -999,7 +810,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Rmdir: %w", translateErrorFile(err))
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
}
|
}
|
||||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
@@ -1015,11 +826,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
err := f.mkParentDir(ctx, remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Move mkParentDir failed: %w", err)
|
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||||
}
|
}
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Move: %w", err)
|
return nil, errors.Wrap(err, "Move")
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||||
@@ -1027,11 +838,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Move Rename failed: %w", err)
|
return nil, errors.Wrap(err, "Move Rename failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Move NewObject failed: %w", err)
|
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1061,19 +872,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
} else if err != fs.ErrorObjectNotFound {
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
return fmt.Errorf("DirMove getInfo failed: %w", err)
|
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkdir(ctx, path.Dir(dstPath))
|
err = f.mkdir(ctx, path.Dir(dstPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("DirMove mkParentDir dst failed: %w", err)
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
c, err := f.getFtpConnection(ctx)
|
c, err := f.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("DirMove: %w", err)
|
return errors.Wrap(err, "DirMove")
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
f.dirFromStandardPath(srcPath),
|
f.dirFromStandardPath(srcPath),
|
||||||
@@ -1081,7 +892,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("DirMove Rename(%q,%q) failed: %w", srcPath, dstPath, err)
|
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1118,41 +929,12 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if !o.info.precise && o.fs.fGetTime {
|
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
|
||||||
if err == nil {
|
|
||||||
path := path.Join(o.fs.root, o.remote)
|
|
||||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
|
||||||
modTime, err := c.GetTime(path)
|
|
||||||
if err == nil && o.info != nil {
|
|
||||||
o.info.ModTime = modTime
|
|
||||||
o.info.precise = true
|
|
||||||
}
|
|
||||||
o.fs.putFtpConnection(&c, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o.info.ModTime
|
return o.info.ModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
// SetModTime sets the modification time of the object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if !o.fs.fSetTime {
|
return nil
|
||||||
fs.Debugf(o.fs, "SetModTime is not supported")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
path := path.Join(o.fs.root, o.remote)
|
|
||||||
path = o.fs.opt.Enc.FromStandardPath(path)
|
|
||||||
err = c.SetTime(path, modTime.In(time.UTC))
|
|
||||||
if err == nil && o.info != nil {
|
|
||||||
o.info.ModTime = modTime
|
|
||||||
o.info.precise = true
|
|
||||||
}
|
|
||||||
o.fs.putFtpConnection(&c, err)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns a boolean as to whether this object is storable
|
// Storable returns a boolean as to whether this object is storable
|
||||||
@@ -1185,11 +967,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
errchan <- f.rc.Close()
|
errchan <- f.rc.Close()
|
||||||
}()
|
}()
|
||||||
// Wait for Close for up to 60 seconds by default
|
// Wait for Close for up to 60 seconds by default
|
||||||
closeTimeout := f.f.opt.CloseTimeout
|
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
|
||||||
if closeTimeout == 0 {
|
|
||||||
closeTimeout = fs.DurationOff
|
|
||||||
}
|
|
||||||
timer := time.NewTimer(time.Duration(closeTimeout))
|
|
||||||
select {
|
select {
|
||||||
case err = <-errchan:
|
case err = <-errchan:
|
||||||
timer.Stop()
|
timer.Stop()
|
||||||
@@ -1209,7 +987,8 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
// mask the error if it was caused by a premature close
|
// mask the error if it was caused by a premature close
|
||||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||||
if errX := textprotoError(err); errX != nil {
|
switch errX := err.(type) {
|
||||||
|
case *textproto.Error:
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||||
err = nil
|
err = nil
|
||||||
@@ -1235,33 +1014,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
var (
|
|
||||||
fd *ftp.Response
|
|
||||||
c *ftp.ServerConn
|
|
||||||
)
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
c, err = o.fs.getFtpConnection(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err // getFtpConnection has retries already
|
|
||||||
}
|
|
||||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
|
||||||
if err != nil {
|
|
||||||
o.fs.putFtpConnection(&c, err)
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("open: %w", err)
|
return nil, errors.Wrap(err, "open")
|
||||||
|
}
|
||||||
|
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||||
|
if err != nil {
|
||||||
|
o.fs.putFtpConnection(&c, err)
|
||||||
|
return nil, errors.Wrap(err, "open")
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||||
return rc, nil
|
return rc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the already existing object
|
// Update the already existing object
|
||||||
//
|
//
|
||||||
// Copy the reader into the object updating modTime and size.
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
@@ -1283,30 +1051,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Update: %w", err)
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||||
// Ignore error 250 here - send by some servers
|
|
||||||
if errX := textprotoError(err); errX != nil {
|
|
||||||
switch errX.Code {
|
|
||||||
case ftp.StatusRequestedFileActionOK:
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Quit() // toss this connection to avoid sync errors
|
_ = c.Quit() // toss this connection to avoid sync errors
|
||||||
// recycle connection in advance to let remove() find free token
|
|
||||||
o.fs.putFtpConnection(nil, err)
|
|
||||||
remove()
|
remove()
|
||||||
return fmt.Errorf("update stor: %w", err)
|
o.fs.putFtpConnection(nil, err)
|
||||||
|
return errors.Wrap(err, "update stor")
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
|
||||||
return fmt.Errorf("SetModTime: %w", err)
|
|
||||||
}
|
|
||||||
o.info, err = o.fs.getInfo(ctx, path)
|
o.info, err = o.fs.getInfo(ctx, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update getinfo: %w", err)
|
return errors.Wrap(err, "update getinfo")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -1325,7 +1082,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection(ctx)
|
c, err := o.fs.getFtpConnection(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Remove: %w", err)
|
return errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||||
o.fs.putFtpConnection(&c, err)
|
o.fs.putFtpConnection(&c, err)
|
||||||
|
|||||||
@@ -1,115 +0,0 @@
|
|||||||
package ftp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type settings map[string]interface{}
|
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
|
||||||
configMap := configmap.Simple{}
|
|
||||||
for key, val := range opts {
|
|
||||||
configMap[key] = fmt.Sprintf("%v", val)
|
|
||||||
}
|
|
||||||
remote := fmt.Sprintf("%s,%s:%s", fsName, configMap.String(), f.Root())
|
|
||||||
fixFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return fixFs
|
|
||||||
}
|
|
||||||
|
|
||||||
// test that big file uploads do not cause network i/o timeout
|
|
||||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
|
||||||
const (
|
|
||||||
fileSize = 100000000 // 100 MiB
|
|
||||||
idleTimeout = 1 * time.Second // small because test server is local
|
|
||||||
maxTime = 10 * time.Second // prevent test hangup
|
|
||||||
)
|
|
||||||
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("not running with -short")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
saveLowLevelRetries := ci.LowLevelRetries
|
|
||||||
saveTimeout := ci.Timeout
|
|
||||||
defer func() {
|
|
||||||
ci.LowLevelRetries = saveLowLevelRetries
|
|
||||||
ci.Timeout = saveTimeout
|
|
||||||
}()
|
|
||||||
ci.LowLevelRetries = 1
|
|
||||||
ci.Timeout = idleTimeout
|
|
||||||
|
|
||||||
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
|
|
||||||
fixFs := deriveFs(ctx, t, f, settings{
|
|
||||||
"concurrency": concurrency,
|
|
||||||
"shut_timeout": shutTimeout,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make test object
|
|
||||||
fileTime := fstest.Time("2020-03-08T09:30:00.000000000Z")
|
|
||||||
meta := object.NewStaticObjectInfo("upload-timeout.test", fileTime, int64(fileSize), true, nil, nil)
|
|
||||||
data := readers.NewPatternReader(int64(fileSize))
|
|
||||||
|
|
||||||
// Run upload and ensure maximum time
|
|
||||||
done := make(chan bool)
|
|
||||||
deadline := time.After(maxTime)
|
|
||||||
go func() {
|
|
||||||
obj, err = fixFs.Put(ctx, data, meta)
|
|
||||||
done <- true
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
case <-deadline:
|
|
||||||
t.Fatalf("Upload got stuck for %v !", maxTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// non-zero shut_timeout should fix i/o errors
|
|
||||||
obj, err := upload(f.opt.Concurrency, time.Second)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
if obj != nil {
|
|
||||||
_ = obj.Remove(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rclone must support precise time with ProFtpd and PureFtpd out of the box.
|
|
||||||
// The VsFtpd server does not support the MFMT command to set file time like
|
|
||||||
// other servers but by default supports the MDTM command in the non-standard
|
|
||||||
// two-argument form for the same purpose.
|
|
||||||
// See "mdtm_write" in https://security.appspot.com/vsftpd/vsftpd_conf.html
|
|
||||||
func (f *Fs) testTimePrecision(t *testing.T) {
|
|
||||||
name := f.Name()
|
|
||||||
if pos := strings.Index(name, "{"); pos != -1 {
|
|
||||||
name = name[:pos]
|
|
||||||
}
|
|
||||||
switch name {
|
|
||||||
case "TestFTPProftpd", "TestFTPPureftpd", "TestFTPVsftpd":
|
|
||||||
assert.LessOrEqual(t, f.Precision(), time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("UploadTimeout", f.testUploadTimeout)
|
|
||||||
t.Run("TimePrecision", f.testTimePrecision)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
@@ -9,27 +9,25 @@ import (
|
|||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against rclone FTP server
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFTPRclone:",
|
|
||||||
NilObject: (*ftp.Object)(nil),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestIntegrationProftpd runs integration tests against proFTPd
|
|
||||||
func TestIntegrationProftpd(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("skipping as -remote is set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestFTPProftpd:",
|
RemoteName: "TestFTPProftpd:",
|
||||||
NilObject: (*ftp.Object)(nil),
|
NilObject: (*ftp.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegrationPureftpd runs integration tests against pureFTPd
|
func TestIntegration2(t *testing.T) {
|
||||||
func TestIntegrationPureftpd(t *testing.T) {
|
if *fstest.RemoteName != "" {
|
||||||
|
t.Skip("skipping as -remote is set")
|
||||||
|
}
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestFTPRclone:",
|
||||||
|
NilObject: (*ftp.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIntegration3(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
if *fstest.RemoteName != "" {
|
||||||
t.Skip("skipping as -remote is set")
|
t.Skip("skipping as -remote is set")
|
||||||
}
|
}
|
||||||
@@ -39,13 +37,12 @@ func TestIntegrationPureftpd(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIntegrationVsftpd runs integration tests against vsFTPd
|
// func TestIntegration4(t *testing.T) {
|
||||||
func TestIntegrationVsftpd(t *testing.T) {
|
// if *fstest.RemoteName != "" {
|
||||||
if *fstest.RemoteName != "" {
|
// t.Skip("skipping as -remote is set")
|
||||||
t.Skip("skipping as -remote is set")
|
// }
|
||||||
}
|
// fstests.Run(t, &fstests.Opt{
|
||||||
fstests.Run(t, &fstests.Opt{
|
// RemoteName: "TestFTPVsftpd:",
|
||||||
RemoteName: "TestFTPVsftpd:",
|
// NilObject: (*ftp.Object)(nil),
|
||||||
NilObject: (*ftp.Object)(nil),
|
// })
|
||||||
})
|
// }
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,17 +16,16 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -44,7 +43,6 @@ import (
|
|||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
option "google.golang.org/api/option"
|
|
||||||
|
|
||||||
// NOTE: This API is deprecated
|
// NOTE: This API is deprecated
|
||||||
storage "google.golang.org/api/storage/v1"
|
storage "google.golang.org/api/storage/v1"
|
||||||
@@ -53,10 +51,10 @@ import (
|
|||||||
const (
|
const (
|
||||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||||
timeFormat = time.RFC3339Nano
|
timeFormatIn = time.RFC3339
|
||||||
metaMtime = "mtime" // key to store mtime in metadata
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||||
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
|
metaMtime = "mtime" // key to store mtime under in metadata
|
||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -67,7 +65,7 @@ var (
|
|||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -78,78 +76,72 @@ func init() {
|
|||||||
Prefix: "gcs",
|
Prefix: "gcs",
|
||||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
saFile, _ := m.Get("service_account_file")
|
saFile, _ := m.Get("service_account_file")
|
||||||
saCreds, _ := m.Get("service_account_credentials")
|
saCreds, _ := m.Get("service_account_credentials")
|
||||||
anonymous, _ := m.Get("anonymous")
|
anonymous, _ := m.Get("anonymous")
|
||||||
envAuth, _ := m.Get("env_auth")
|
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
return
|
||||||
return nil, nil
|
}
|
||||||
|
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
|
||||||
OAuth2Config: storageConfig,
|
|
||||||
})
|
|
||||||
},
|
},
|
||||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "project_number",
|
Name: "project_number",
|
||||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "user_project",
|
|
||||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_credentials",
|
Name: "service_account_credentials",
|
||||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "anonymous",
|
Name: "anonymous",
|
||||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
}, {
|
||||||
Name: "object_acl",
|
Name: "object_acl",
|
||||||
Help: "Access Control List for new objects.",
|
Help: "Access Control List for new objects.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "authenticatedRead",
|
Value: "authenticatedRead",
|
||||||
Help: "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
|
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "bucketOwnerFullControl",
|
Value: "bucketOwnerFullControl",
|
||||||
Help: "Object owner gets OWNER access.\nProject team owners get OWNER access.",
|
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "bucketOwnerRead",
|
Value: "bucketOwnerRead",
|
||||||
Help: "Object owner gets OWNER access.\nProject team owners get READER access.",
|
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Object owner gets OWNER access.\nDefault if left blank.",
|
Help: "Object owner gets OWNER access [default if left blank].",
|
||||||
}, {
|
}, {
|
||||||
Value: "projectPrivate",
|
Value: "projectPrivate",
|
||||||
Help: "Object owner gets OWNER access.\nProject team members get access according to their roles.",
|
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicRead",
|
Value: "publicRead",
|
||||||
Help: "Object owner gets OWNER access.\nAll Users get READER access.",
|
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "bucket_acl",
|
Name: "bucket_acl",
|
||||||
Help: "Access Control List for new buckets.",
|
Help: "Access Control List for new buckets.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "authenticatedRead",
|
Value: "authenticatedRead",
|
||||||
Help: "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
|
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "private",
|
Value: "private",
|
||||||
Help: "Project team owners get OWNER access.\nDefault if left blank.",
|
Help: "Project team owners get OWNER access [default if left blank].",
|
||||||
}, {
|
}, {
|
||||||
Value: "projectPrivate",
|
Value: "projectPrivate",
|
||||||
Help: "Project team members get access according to their roles.",
|
Help: "Project team members get access according to their roles.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicRead",
|
Value: "publicRead",
|
||||||
Help: "Project team owners get OWNER access.\nAll Users get READER access.",
|
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||||
}, {
|
}, {
|
||||||
Value: "publicReadWrite",
|
Value: "publicReadWrite",
|
||||||
Help: "Project team owners get OWNER access.\nAll Users get WRITER access.",
|
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "bucket_policy_only",
|
Name: "bucket_policy_only",
|
||||||
@@ -172,112 +164,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
Help: "Location for the newly created buckets.",
|
Help: "Location for the newly created buckets.",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Empty for default location (US)",
|
Help: "Empty for default location (US).",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia",
|
Value: "asia",
|
||||||
Help: "Multi-regional location for Asia",
|
Help: "Multi-regional location for Asia.",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu",
|
Value: "eu",
|
||||||
Help: "Multi-regional location for Europe",
|
Help: "Multi-regional location for Europe.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us",
|
Value: "us",
|
||||||
Help: "Multi-regional location for United States",
|
Help: "Multi-regional location for United States.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-east1",
|
Value: "asia-east1",
|
||||||
Help: "Taiwan",
|
Help: "Taiwan.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-east2",
|
Value: "asia-east2",
|
||||||
Help: "Hong Kong",
|
Help: "Hong Kong.",
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-northeast1",
|
Value: "asia-northeast1",
|
||||||
Help: "Tokyo",
|
Help: "Tokyo.",
|
||||||
}, {
|
|
||||||
Value: "asia-northeast2",
|
|
||||||
Help: "Osaka",
|
|
||||||
}, {
|
|
||||||
Value: "asia-northeast3",
|
|
||||||
Help: "Seoul",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-south1",
|
Value: "asia-south1",
|
||||||
Help: "Mumbai",
|
Help: "Mumbai.",
|
||||||
}, {
|
|
||||||
Value: "asia-south2",
|
|
||||||
Help: "Delhi",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-southeast1",
|
Value: "asia-southeast1",
|
||||||
Help: "Singapore",
|
Help: "Singapore.",
|
||||||
}, {
|
|
||||||
Value: "asia-southeast2",
|
|
||||||
Help: "Jakarta",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "australia-southeast1",
|
Value: "australia-southeast1",
|
||||||
Help: "Sydney",
|
Help: "Sydney.",
|
||||||
}, {
|
|
||||||
Value: "australia-southeast2",
|
|
||||||
Help: "Melbourne",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-north1",
|
Value: "europe-north1",
|
||||||
Help: "Finland",
|
Help: "Finland.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west1",
|
Value: "europe-west1",
|
||||||
Help: "Belgium",
|
Help: "Belgium.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west2",
|
Value: "europe-west2",
|
||||||
Help: "London",
|
Help: "London.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west3",
|
Value: "europe-west3",
|
||||||
Help: "Frankfurt",
|
Help: "Frankfurt.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west4",
|
Value: "europe-west4",
|
||||||
Help: "Netherlands",
|
Help: "Netherlands.",
|
||||||
}, {
|
|
||||||
Value: "europe-west6",
|
|
||||||
Help: "Zürich",
|
|
||||||
}, {
|
|
||||||
Value: "europe-central2",
|
|
||||||
Help: "Warsaw",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-central1",
|
Value: "us-central1",
|
||||||
Help: "Iowa",
|
Help: "Iowa.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east1",
|
Value: "us-east1",
|
||||||
Help: "South Carolina",
|
Help: "South Carolina.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-east4",
|
Value: "us-east4",
|
||||||
Help: "Northern Virginia",
|
Help: "Northern Virginia.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon",
|
Help: "Oregon.",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west2",
|
Value: "us-west2",
|
||||||
Help: "California",
|
Help: "California.",
|
||||||
}, {
|
|
||||||
Value: "us-west3",
|
|
||||||
Help: "Salt Lake City",
|
|
||||||
}, {
|
|
||||||
Value: "us-west4",
|
|
||||||
Help: "Las Vegas",
|
|
||||||
}, {
|
|
||||||
Value: "northamerica-northeast1",
|
|
||||||
Help: "Montréal",
|
|
||||||
}, {
|
|
||||||
Value: "northamerica-northeast2",
|
|
||||||
Help: "Toronto",
|
|
||||||
}, {
|
|
||||||
Value: "southamerica-east1",
|
|
||||||
Help: "São Paulo",
|
|
||||||
}, {
|
|
||||||
Value: "southamerica-west1",
|
|
||||||
Help: "Santiago",
|
|
||||||
}, {
|
|
||||||
Value: "asia1",
|
|
||||||
Help: "Dual region: asia-northeast1 and asia-northeast2.",
|
|
||||||
}, {
|
|
||||||
Value: "eur4",
|
|
||||||
Help: "Dual region: europe-north1 and europe-west4.",
|
|
||||||
}, {
|
|
||||||
Value: "nam4",
|
|
||||||
Help: "Dual region: us-central1 and us-east1.",
|
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -304,41 +248,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||||
Help: "Durable reduced availability storage class",
|
Help: "Durable reduced availability storage class",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "directory_markers",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
Help: `Upload an empty object with a trailing slash when a new directory is created
|
|
||||||
|
|
||||||
Empty folders are unsupported for bucket based remotes, this option creates an empty
|
|
||||||
object ending with "/", to persist the folder.
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "no_check_bucket",
|
|
||||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
|
||||||
|
|
||||||
This can be useful when trying to minimise the number of transactions
|
|
||||||
rclone does if you know the bucket exists already.
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "decompress",
|
|
||||||
Help: `If set this will decompress gzip encoded objects.
|
|
||||||
|
|
||||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
|
||||||
set. Normally rclone will download these files as compressed objects.
|
|
||||||
|
|
||||||
If this flag is set then rclone will decompress these files with
|
|
||||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
|
||||||
can't check the size and hash but the file contents will be decompressed.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -346,17 +255,6 @@ can't check the size and hash but the file contents will be decompressed.
|
|||||||
Default: (encoder.Base |
|
Default: (encoder.Base |
|
||||||
encoder.EncodeCrLf |
|
encoder.EncodeCrLf |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}, {
|
|
||||||
Name: "env_auth",
|
|
||||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
|
||||||
Default: false,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "false",
|
|
||||||
Help: "Enter credentials in the next step.",
|
|
||||||
}, {
|
|
||||||
Value: "true",
|
|
||||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
|
||||||
}},
|
|
||||||
}}...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -364,7 +262,6 @@ can't check the size and hash but the file contents will be decompressed.
|
|||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ProjectNumber string `config:"project_number"`
|
ProjectNumber string `config:"project_number"`
|
||||||
UserProject string `config:"user_project"`
|
|
||||||
ServiceAccountFile string `config:"service_account_file"`
|
ServiceAccountFile string `config:"service_account_file"`
|
||||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||||
Anonymous bool `config:"anonymous"`
|
Anonymous bool `config:"anonymous"`
|
||||||
@@ -373,27 +270,21 @@ type Options struct {
|
|||||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||||
Location string `config:"location"`
|
Location string `config:"location"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
NoCheckBucket bool `config:"no_check_bucket"`
|
|
||||||
Decompress bool `config:"decompress"`
|
|
||||||
Endpoint string `config:"endpoint"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
svc *storage.Service // the connection to the storage server
|
svc *storage.Service // the connection to the storage server
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
rootBucket string // bucket part of root (if any)
|
rootBucket string // bucket part of root (if any)
|
||||||
rootDirectory string // directory part of root (if any)
|
rootDirectory string // directory part of root (if any)
|
||||||
cache *bucket.Cache // cache of bucket status
|
cache *bucket.Cache // cache of bucket status
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
warnCompressed sync.Once // warn once about compressed files
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -407,7 +298,6 @@ type Object struct {
|
|||||||
bytes int64 // Bytes in the object
|
bytes int64 // Bytes in the object
|
||||||
modTime time.Time // Modified time of the object
|
modTime time.Time // Modified time of the object
|
||||||
mimeType string
|
mimeType string
|
||||||
gzipped bool // set if object has Content-Encoding: gzip
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -425,7 +315,7 @@ func (f *Fs) Root() string {
|
|||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.rootBucket == "" {
|
if f.rootBucket == "" {
|
||||||
return "GCS root"
|
return fmt.Sprintf("GCS root")
|
||||||
}
|
}
|
||||||
if f.rootDirectory == "" {
|
if f.rootDirectory == "" {
|
||||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||||
@@ -474,7 +364,7 @@ func parsePath(path string) (root string) {
|
|||||||
// split returns bucket and bucketPath from the rootRelativePath
|
// split returns bucket and bucketPath from the rootRelativePath
|
||||||
// relative to f.root
|
// relative to f.root
|
||||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -486,7 +376,7 @@ func (o *Object) split() (bucket, bucketPath string) {
|
|||||||
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error processing credentials: %w", err)
|
return nil, errors.Wrap(err, "error processing credentials")
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
@@ -517,9 +407,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
// try loading service account credentials from env variable, then from a file
|
// try loading service account credentials from env variable, then from a file
|
||||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||||
}
|
}
|
||||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
@@ -528,12 +418,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
} else if opt.ServiceAccountCredentials != "" {
|
} else if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||||
}
|
|
||||||
} else if opt.EnvAuth {
|
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
@@ -541,7 +426,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -550,7 +435,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
@@ -560,30 +445,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
BucketBasedRootOK: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
if opt.DirectoryMarkers {
|
|
||||||
f.features.CanHaveEmptyDirectories = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
f.svc, err = storage.New(f.client)
|
||||||
if opt.Endpoint != "" {
|
|
||||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
|
||||||
}
|
|
||||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
get := f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx)
|
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
get = get.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
_, err = get.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -631,7 +505,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
|||||||
//
|
//
|
||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories.
|
// Set recurse to read sub directories
|
||||||
//
|
//
|
||||||
// The remote has prefix removed from it and if addBucket is set
|
// The remote has prefix removed from it and if addBucket is set
|
||||||
// then it adds the bucket to the start.
|
// then it adds the bucket to the start.
|
||||||
@@ -643,13 +517,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
directory += "/"
|
directory += "/"
|
||||||
}
|
}
|
||||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
list = list.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
if !recurse {
|
if !recurse {
|
||||||
list = list.Delimiter("/")
|
list = list.Delimiter("/")
|
||||||
}
|
}
|
||||||
foundItems := 0
|
|
||||||
for {
|
for {
|
||||||
var objects *storage.Objects
|
var objects *storage.Objects
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -665,7 +535,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !recurse {
|
if !recurse {
|
||||||
foundItems += len(objects.Prefixes)
|
|
||||||
var object storage.Object
|
var object storage.Object
|
||||||
for _, remote := range objects.Prefixes {
|
for _, remote := range objects.Prefixes {
|
||||||
if !strings.HasSuffix(remote, "/") {
|
if !strings.HasSuffix(remote, "/") {
|
||||||
@@ -686,29 +555,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
foundItems += len(objects.Items)
|
|
||||||
for _, object := range objects.Items {
|
for _, object := range objects.Items {
|
||||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||||
if !strings.HasPrefix(remote, prefix) {
|
if !strings.HasPrefix(remote, prefix) {
|
||||||
fs.Logf(f, "Odd name received %q", object.Name)
|
fs.Logf(f, "Odd name received %q", object.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
|
||||||
// is this a directory marker?
|
|
||||||
if isDirectory {
|
|
||||||
// Don't insert the root directory
|
|
||||||
if remote == directory {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// process directory markers as directories
|
|
||||||
remote = strings.TrimRight(remote, "/")
|
|
||||||
}
|
|
||||||
remote = remote[len(prefix):]
|
remote = remote[len(prefix):]
|
||||||
|
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||||
if addBucket {
|
if addBucket {
|
||||||
remote = path.Join(bucket, remote)
|
remote = path.Join(bucket, remote)
|
||||||
}
|
}
|
||||||
|
// is this a directory marker?
|
||||||
err = fn(remote, object, isDirectory)
|
if isDirectory {
|
||||||
|
continue // skip directory marker
|
||||||
|
}
|
||||||
|
err = fn(remote, object, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -718,17 +580,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
list.PageToken(objects.NextPageToken)
|
list.PageToken(objects.NextPageToken)
|
||||||
}
|
}
|
||||||
if f.opt.DirectoryMarkers && foundItems == 0 && directory != "" {
|
|
||||||
// Determine whether the directory exists or not by whether it has a marker
|
|
||||||
_, err := f.readObjectInfo(ctx, bucket, directory)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound {
|
|
||||||
return fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -772,9 +623,6 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
return nil, errors.New("can't list buckets without project number")
|
return nil, errors.New("can't list buckets without project number")
|
||||||
}
|
}
|
||||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
listBuckets = listBuckets.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
for {
|
for {
|
||||||
var buckets *storage.Buckets
|
var buckets *storage.Buckets
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -875,7 +723,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -892,69 +740,10 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return f.Put(ctx, in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create directory marker file and parents
|
|
||||||
func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
|
|
||||||
if !f.opt.DirectoryMarkers || bucket == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object to be uploaded
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
modTime: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
_, bucketPath := f.split(dir)
|
|
||||||
// Don't create the directory marker if it is the bucket or at the very root
|
|
||||||
if bucketPath == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
o.remote = dir + "/"
|
|
||||||
|
|
||||||
// Check to see if object already exists
|
|
||||||
_, err := o.readObjectInfo(ctx)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload it if not
|
|
||||||
fs.Debugf(o, "Creating directory marker")
|
|
||||||
content := io.Reader(strings.NewReader(""))
|
|
||||||
err = o.Update(ctx, content, o)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating directory marker failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now check parent directory exists
|
|
||||||
dir = path.Dir(dir)
|
|
||||||
if dir == "/" || dir == "." {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
bucket, _ := f.split(dir)
|
bucket, _ := f.split(dir)
|
||||||
e := f.checkBucket(ctx, bucket)
|
return f.makeBucket(ctx, bucket)
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
|
||||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
|
||||||
remote = strings.TrimRight(remote, "/")
|
|
||||||
dir := path.Dir(remote)
|
|
||||||
if dir == "/" || dir == "." {
|
|
||||||
dir = ""
|
|
||||||
}
|
|
||||||
return f.Mkdir(ctx, dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeBucket creates the bucket if it doesn't exist
|
// makeBucket creates the bucket if it doesn't exist
|
||||||
@@ -963,11 +752,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
list := f.svc.Objects.List(bucket).MaxResults(1).Context(ctx)
|
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
list = list.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
_, err = list.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -975,10 +760,10 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
if gErr.Code != http.StatusNotFound {
|
if gErr.Code != http.StatusNotFound {
|
||||||
return fmt.Errorf("failed to get bucket: %w", err)
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("failed to get bucket: %w", err)
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
@@ -1002,52 +787,24 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
|||||||
if !f.opt.BucketPolicyOnly {
|
if !f.opt.BucketPolicyOnly {
|
||||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||||
}
|
}
|
||||||
insertBucket = insertBucket.Context(ctx)
|
_, err = insertBucket.Context(ctx).Do()
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
insertBucket = insertBucket.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
_, err = insertBucket.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
|
||||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
|
||||||
if f.opt.NoCheckBucket {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f.makeBucket(ctx, bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||||
// to delete was not empty.
|
// to delete was not empty.
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||||
bucket, directory := f.split(dir)
|
bucket, directory := f.split(dir)
|
||||||
// Remove directory marker file
|
|
||||||
if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
|
|
||||||
o := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: dir + "/",
|
|
||||||
}
|
|
||||||
fs.Debugf(o, "Removing directory marker")
|
|
||||||
err := o.Remove(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("removing directory marker failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bucket == "" || directory != "" {
|
if bucket == "" || directory != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return f.cache.Remove(bucket, func() error {
|
return f.cache.Remove(bucket, func() error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
deleteBucket := f.svc.Buckets.Delete(bucket).Context(ctx)
|
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
deleteBucket = deleteBucket.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
err = deleteBucket.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -1060,16 +817,16 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given
|
||||||
//
|
//
|
||||||
// It returns the destination Object and a possible error.
|
// It returns the destination Object and a possible error
|
||||||
//
|
//
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstBucket, dstPath := f.split(remote)
|
dstBucket, dstPath := f.split(remote)
|
||||||
err := f.mkdirParent(ctx, remote)
|
err := f.makeBucket(ctx, dstBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1093,11 +850,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var rewriteResponse *storage.RewriteResponse
|
var rewriteResponse *storage.RewriteResponse
|
||||||
for {
|
for {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
rewriteRequest = rewriteRequest.Context(ctx)
|
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
rewriteRequest.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
rewriteResponse, err = rewriteRequest.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1157,7 +910,6 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
o.url = info.MediaLink
|
o.url = info.MediaLink
|
||||||
o.bytes = int64(info.Size)
|
o.bytes = int64(info.Size)
|
||||||
o.mimeType = info.ContentType
|
o.mimeType = info.ContentType
|
||||||
o.gzipped = info.ContentEncoding == "gzip"
|
|
||||||
|
|
||||||
// Read md5sum
|
// Read md5sum
|
||||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||||
@@ -1170,7 +922,7 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
// read mtime out of metadata if available
|
// read mtime out of metadata if available
|
||||||
mtimeString, ok := info.Metadata[metaMtime]
|
mtimeString, ok := info.Metadata[metaMtime]
|
||||||
if ok {
|
if ok {
|
||||||
modTime, err := time.Parse(timeFormat, mtimeString)
|
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
return
|
return
|
||||||
@@ -1178,46 +930,20 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to GSUtil mtime
|
|
||||||
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
|
|
||||||
if ok {
|
|
||||||
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
|
|
||||||
if err == nil {
|
|
||||||
o.modTime = time.Unix(unixTimeSec, 0)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to the Updated time
|
// Fallback to the Updated time
|
||||||
modTime, err := time.Parse(timeFormat, info.Updated)
|
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Bad time decode: %v", err)
|
fs.Logf(o, "Bad time decode: %v", err)
|
||||||
} else {
|
} else {
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// If gunzipping then size and md5sum are unknown
|
|
||||||
if o.gzipped && o.fs.opt.Decompress {
|
|
||||||
o.bytes = -1
|
|
||||||
o.md5sum = ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// readObjectInfo reads the definition for an object
|
// readObjectInfo reads the definition for an object
|
||||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
return o.fs.readObjectInfo(ctx, bucket, bucketPath)
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
}
|
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||||
|
|
||||||
// readObjectInfo reads the definition for an object
|
|
||||||
func (f *Fs) readObjectInfo(ctx context.Context, bucket, bucketPath string) (object *storage.Object, err error) {
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
get := f.svc.Objects.Get(bucket, bucketPath).Context(ctx)
|
|
||||||
if f.opt.UserProject != "" {
|
|
||||||
get = get.UserProject(f.opt.UserProject)
|
|
||||||
}
|
|
||||||
object, err = get.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1262,8 +988,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
// Returns metadata for an object
|
// Returns metadata for an object
|
||||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||||
metadata := make(map[string]string, 1)
|
metadata := make(map[string]string, 1)
|
||||||
metadata[metaMtime] = modTime.Format(timeFormat)
|
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||||
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
|
||||||
return metadata
|
return metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1275,11 +1000,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Add the mtime to the existing metadata
|
// Add the mtime to the existing metadata
|
||||||
|
mtime := modTime.Format(timeFormatOut)
|
||||||
if object.Metadata == nil {
|
if object.Metadata == nil {
|
||||||
object.Metadata = make(map[string]string, 1)
|
object.Metadata = make(map[string]string, 1)
|
||||||
}
|
}
|
||||||
object.Metadata[metaMtime] = modTime.Format(timeFormat)
|
object.Metadata[metaMtime] = mtime
|
||||||
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
|
|
||||||
// Copy the object to itself to update the metadata
|
// Copy the object to itself to update the metadata
|
||||||
// Using PATCH requires too many permissions
|
// Using PATCH requires too many permissions
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
@@ -1289,11 +1014,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
|||||||
if !o.fs.opt.BucketPolicyOnly {
|
if !o.fs.opt.BucketPolicyOnly {
|
||||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||||
}
|
}
|
||||||
copyObject = copyObject.Context(ctx)
|
newObject, err = copyObject.Context(ctx).Do()
|
||||||
if o.fs.opt.UserProject != "" {
|
|
||||||
copyObject = copyObject.UserProject(o.fs.opt.UserProject)
|
|
||||||
}
|
|
||||||
newObject, err = copyObject.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1310,27 +1031,11 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url
|
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil)
|
||||||
if o.fs.opt.UserProject != "" {
|
|
||||||
url += "&userProject=" + o.fs.opt.UserProject
|
|
||||||
}
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs.FixRangeOption(options, o.bytes)
|
fs.FixRangeOption(options, o.bytes)
|
||||||
if o.gzipped && !o.fs.opt.Decompress {
|
|
||||||
// Allow files which are stored on the cloud storage system
|
|
||||||
// compressed to be downloaded without being decompressed. Note
|
|
||||||
// that setting this here overrides the automatic decompression
|
|
||||||
// in the Transport.
|
|
||||||
//
|
|
||||||
// See: https://cloud.google.com/storage/docs/transcoding
|
|
||||||
req.Header.Set("Accept-Encoding", "gzip")
|
|
||||||
o.fs.warnCompressed.Do(func() {
|
|
||||||
fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --gcs-decompress to override")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1349,7 +1054,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
_, isRanging := req.Header["Range"]
|
_, isRanging := req.Header["Range"]
|
||||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||||
_ = res.Body.Close() // ignore error
|
_ = res.Body.Close() // ignore error
|
||||||
return nil, fmt.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||||
}
|
}
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
@@ -1357,14 +1062,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
// Create parent dir/bucket if not saving directory marker
|
err := o.fs.makeBucket(ctx, bucket)
|
||||||
if !strings.HasSuffix(o.remote, "/") {
|
if err != nil {
|
||||||
err = o.fs.mkdirParent(ctx, o.remote)
|
return err
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
@@ -1409,11 +1111,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if !o.fs.opt.BucketPolicyOnly {
|
if !o.fs.opt.BucketPolicyOnly {
|
||||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||||
}
|
}
|
||||||
insertObject = insertObject.Context(ctx)
|
newObject, err = insertObject.Context(ctx).Do()
|
||||||
if o.fs.opt.UserProject != "" {
|
|
||||||
insertObject = insertObject.UserProject(o.fs.opt.UserProject)
|
|
||||||
}
|
|
||||||
newObject, err = insertObject.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1428,11 +1126,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
deleteBucket := o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx)
|
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||||
if o.fs.opt.UserProject != "" {
|
|
||||||
deleteBucket = deleteBucket.UserProject(o.fs.opt.UserProject)
|
|
||||||
}
|
|
||||||
err = deleteBucket.Do()
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -17,17 +16,3 @@ func TestIntegration(t *testing.T) {
|
|||||||
NilObject: (*googlecloudstorage.Object)(nil),
|
NilObject: (*googlecloudstorage.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegration2(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
name := "TestGoogleCloudStorage"
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: name + ":",
|
|
||||||
NilObject: (*googlecloudstorage.Object)(nil),
|
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
// Package api provides types used by the Google Photos API.
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ package googlephotos
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
golog "log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@@ -18,9 +18,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
@@ -29,8 +29,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
@@ -56,7 +54,6 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||||
scopeAccess = 2 // position of access scope in list
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -65,20 +62,12 @@ var (
|
|||||||
Scopes: []string{
|
Scopes: []string{
|
||||||
"openid",
|
"openid",
|
||||||
"profile",
|
"profile",
|
||||||
scopeReadWrite, // this must be at position scopeAccess
|
scopeReadWrite,
|
||||||
},
|
},
|
||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||||
}
|
|
||||||
|
|
||||||
// Configure the batcher
|
|
||||||
defaultBatcherOptions = batcher.Options{
|
|
||||||
MaxBatchSize: 50,
|
|
||||||
DefaultTimeoutSync: 1000 * time.Millisecond,
|
|
||||||
DefaultTimeoutAsync: 10 * time.Second,
|
|
||||||
DefaultBatchSizeAsync: 50,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -89,38 +78,38 @@ func init() {
|
|||||||
Prefix: "gphotos",
|
Prefix: "gphotos",
|
||||||
Description: "Google Photos",
|
Description: "Google Photos",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't parse config into struct: %w", err)
|
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch config.State {
|
// Fill in the scopes
|
||||||
case "":
|
if opt.ReadOnly {
|
||||||
// Fill in the scopes
|
oauthConfig.Scopes[0] = scopeReadOnly
|
||||||
if opt.ReadOnly {
|
} else {
|
||||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
oauthConfig.Scopes[0] = scopeReadWrite
|
||||||
} else {
|
|
||||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
|
||||||
}
|
|
||||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
|
||||||
OAuth2Config: oauthConfig,
|
|
||||||
})
|
|
||||||
case "warning":
|
|
||||||
// Warn the user as required by google photos integration
|
|
||||||
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
|
|
||||||
|
|
||||||
IMPORTANT: All media items uploaded to Google Photos with rclone
|
|
||||||
are stored in full resolution at original quality. These uploads
|
|
||||||
will count towards storage in your Google Account.`)
|
|
||||||
case "warning_done":
|
|
||||||
return nil, nil
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
|
||||||
|
// Do the oauth
|
||||||
|
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||||
|
if err != nil {
|
||||||
|
golog.Fatalf("Failed to configure token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn the user
|
||||||
|
fmt.Print(`
|
||||||
|
*** IMPORTANT: All media items uploaded to Google Photos with rclone
|
||||||
|
*** are stored in full resolution at original quality. These uploads
|
||||||
|
*** will count towards storage in your Google Account.
|
||||||
|
|
||||||
|
`)
|
||||||
|
|
||||||
},
|
},
|
||||||
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||||
Name: "read_only",
|
Name: "read_only",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Set to make the Google Photos backend read only.
|
Help: `Set to make the Google Photos backend read only.
|
||||||
@@ -141,14 +130,14 @@ you want to read the media.`,
|
|||||||
}, {
|
}, {
|
||||||
Name: "start_year",
|
Name: "start_year",
|
||||||
Default: 2000,
|
Default: 2000,
|
||||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
|
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "include_archived",
|
Name: "include_archived",
|
||||||
Default: false,
|
Default: false,
|
||||||
Help: `Also view and download archived media.
|
Help: `Also view and download archived media.
|
||||||
|
|
||||||
By default, rclone does not request archived media. Thus, when syncing,
|
By default rclone does not request archived media. Thus, when syncing,
|
||||||
archived media is not visible in directory listings or transferred.
|
archived media is not visible in directory listings or transferred.
|
||||||
|
|
||||||
Note that media in albums is always visible and synced, no matter
|
Note that media in albums is always visible and synced, no matter
|
||||||
@@ -160,27 +149,16 @@ listings and transferred.
|
|||||||
Without this flag, archived media will not be visible in directory
|
Without this flag, archived media will not be visible in directory
|
||||||
listings and won't be transferred.`,
|
listings and won't be transferred.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}}...),
|
||||||
Name: config.ConfigEncoding,
|
|
||||||
Help: config.ConfigEncodingHelp,
|
|
||||||
Advanced: true,
|
|
||||||
Default: (encoder.Base |
|
|
||||||
encoder.EncodeCrLf |
|
|
||||||
encoder.EncodeInvalidUtf8),
|
|
||||||
}}...), defaultBatcherOptions.FsOptions("")...),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ReadOnly bool `config:"read_only"`
|
ReadOnly bool `config:"read_only"`
|
||||||
ReadSize bool `config:"read_size"`
|
ReadSize bool `config:"read_size"`
|
||||||
StartYear int `config:"start_year"`
|
StartYear int `config:"start_year"`
|
||||||
IncludeArchived bool `config:"include_archived"`
|
IncludeArchived bool `config:"include_archived"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
|
||||||
BatchMode string `config:"batch_mode"`
|
|
||||||
BatchSize int `config:"batch_size"`
|
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@@ -190,7 +168,7 @@ type Fs struct {
|
|||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
unAuth *rest.Client // unauthenticated http client
|
unAuth *rest.Client // unauthenticated http client
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the one drive server
|
||||||
ts *oauthutil.TokenSource // token source for oauth2
|
ts *oauthutil.TokenSource // token source for oauth2
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
startTime time.Time // time Fs was started - used for datestamps
|
startTime time.Time // time Fs was started - used for datestamps
|
||||||
@@ -199,7 +177,6 @@ type Fs struct {
|
|||||||
uploadedMu sync.Mutex // to protect the below
|
uploadedMu sync.Mutex // to protect the below
|
||||||
uploaded dirtree.DirTree // record of uploaded items
|
uploaded dirtree.DirTree // record of uploaded items
|
||||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||||
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -305,7 +282,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
baseClient := fshttp.NewClient(ctx)
|
baseClient := fshttp.NewClient(ctx)
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
return nil, errors.Wrap(err, "failed to configure Box")
|
||||||
}
|
}
|
||||||
|
|
||||||
root = strings.Trim(path.Clean(root), "/")
|
root = strings.Trim(path.Clean(root), "/")
|
||||||
@@ -325,14 +302,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
albums: map[bool]*albums{},
|
albums: map[bool]*albums{},
|
||||||
uploaded: dirtree.New(),
|
uploaded: dirtree.New(),
|
||||||
}
|
}
|
||||||
batcherOptions := defaultBatcherOptions
|
|
||||||
batcherOptions.Mode = f.opt.BatchMode
|
|
||||||
batcherOptions.Size = f.opt.BatchSize
|
|
||||||
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
|
|
||||||
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
@@ -366,13 +335,13 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("couldn't read openID config: %w", err)
|
return "", errors.Wrap(err, "couldn't read openID config")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find userinfo endpoint
|
// Find userinfo endpoint
|
||||||
endpoint, ok := openIDconfig[name].(string)
|
endpoint, ok := openIDconfig[name].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("couldn't find %q from openID config", name)
|
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return endpoint, nil
|
return endpoint, nil
|
||||||
@@ -395,7 +364,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't read user info: %w", err)
|
return nil, errors.Wrap(err, "couldn't read user info")
|
||||||
}
|
}
|
||||||
return userInfo, nil
|
return userInfo, nil
|
||||||
}
|
}
|
||||||
@@ -426,7 +395,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't revoke token: %w", err)
|
return errors.Wrap(err, "couldn't revoke token")
|
||||||
}
|
}
|
||||||
fs.Infof(f, "res = %+v", res)
|
fs.Infof(f, "res = %+v", res)
|
||||||
return nil
|
return nil
|
||||||
@@ -513,7 +482,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't list albums: %w", err)
|
return nil, errors.Wrap(err, "couldn't list albums")
|
||||||
}
|
}
|
||||||
newAlbums := result.Albums
|
newAlbums := result.Albums
|
||||||
if shared {
|
if shared {
|
||||||
@@ -527,9 +496,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
|
|||||||
lastID = newAlbums[len(newAlbums)-1].ID
|
lastID = newAlbums[len(newAlbums)-1].ID
|
||||||
}
|
}
|
||||||
for i := range newAlbums {
|
for i := range newAlbums {
|
||||||
anAlbum := newAlbums[i]
|
all.add(&newAlbums[i])
|
||||||
anAlbum.Title = f.opt.Enc.FromStandardPath(anAlbum.Title)
|
|
||||||
all.add(&anAlbum)
|
|
||||||
}
|
}
|
||||||
if result.NextPageToken == "" {
|
if result.NextPageToken == "" {
|
||||||
break
|
break
|
||||||
@@ -570,7 +537,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't list files: %w", err)
|
return errors.Wrap(err, "couldn't list files")
|
||||||
}
|
}
|
||||||
items := result.MediaItems
|
items := result.MediaItems
|
||||||
if len(items) > 0 && items[0].ID == lastID {
|
if len(items) > 0 && items[0].ID == lastID {
|
||||||
@@ -583,7 +550,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
|
|||||||
for i := range items {
|
for i := range items {
|
||||||
item := &result.MediaItems[i]
|
item := &result.MediaItems[i]
|
||||||
remote := item.Filename
|
remote := item.Filename
|
||||||
remote = strings.ReplaceAll(remote, "/", "/")
|
remote = strings.Replace(remote, "/", "/", -1)
|
||||||
err = fn(remote, item, false)
|
err = fn(remote, item, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -682,7 +649,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Put the object into the bucket
|
// Put the object into the bucket
|
||||||
//
|
//
|
||||||
// Copy the reader in to the new object which is returned.
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
@@ -714,7 +681,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't create album: %w", err)
|
return nil, errors.Wrap(err, "couldn't create album")
|
||||||
}
|
}
|
||||||
f.albums[false].add(&result)
|
f.albums[false].add(&result)
|
||||||
return &result, nil
|
return &result, nil
|
||||||
@@ -802,13 +769,6 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return hash.Set(hash.None)
|
return hash.Set(hash.None)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
|
||||||
// cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
||||||
f.batcher.Shutdown()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// Fs returns the parent Fs
|
// Fs returns the parent Fs
|
||||||
@@ -907,7 +867,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't get media item: %w", err)
|
return errors.Wrap(err, "couldn't get media item")
|
||||||
}
|
}
|
||||||
o.setMetaData(&item)
|
o.setMetaData(&item)
|
||||||
return nil
|
return nil
|
||||||
@@ -989,82 +949,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// input to the batcher
|
|
||||||
type uploadedItem struct {
|
|
||||||
AlbumID string // desired album
|
|
||||||
UploadToken string // upload ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit a batch of items to albumID returning the errors in errors
|
|
||||||
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
|
|
||||||
// Create the media item from an UploadToken, optionally adding to an album
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/mediaItems:batchCreate",
|
|
||||||
}
|
|
||||||
var request = api.BatchCreateRequest{
|
|
||||||
AlbumID: albumID,
|
|
||||||
}
|
|
||||||
itemsInBatch := 0
|
|
||||||
for i := range items {
|
|
||||||
if items[i].AlbumID == albumID {
|
|
||||||
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
|
|
||||||
SimpleMediaItem: api.SimpleMediaItem{
|
|
||||||
UploadToken: items[i].UploadToken,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
itemsInBatch++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var result api.BatchCreateResponse
|
|
||||||
var resp *http.Response
|
|
||||||
var err error
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
|
||||||
return shouldRetry(ctx, resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to create media item: %w", err)
|
|
||||||
}
|
|
||||||
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
|
|
||||||
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
|
|
||||||
}
|
|
||||||
j := 0
|
|
||||||
for i := range items {
|
|
||||||
if items[i].AlbumID == albumID {
|
|
||||||
if err == nil {
|
|
||||||
media := &result.NewMediaItemResults[j]
|
|
||||||
if media.Status.Code != 0 {
|
|
||||||
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
|
|
||||||
} else {
|
|
||||||
results[i] = &media.MediaItem
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
errors[i] = err
|
|
||||||
}
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by the batcher to commit a batch
|
|
||||||
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
|
|
||||||
// Discover all the AlbumIDs as we have to upload these separately
|
|
||||||
//
|
|
||||||
// Should maybe have one batcher per AlbumID
|
|
||||||
albumIDs := map[string]struct{}{}
|
|
||||||
for i := range items {
|
|
||||||
albumIDs[items[i].AlbumID] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// batch the albums
|
|
||||||
for albumID := range albumIDs {
|
|
||||||
// errors returned in errors
|
|
||||||
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
@@ -1118,36 +1002,44 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't upload file: %w", err)
|
return errors.Wrap(err, "couldn't upload file")
|
||||||
}
|
}
|
||||||
uploadToken := strings.TrimSpace(string(token))
|
uploadToken := strings.TrimSpace(string(token))
|
||||||
if uploadToken == "" {
|
if uploadToken == "" {
|
||||||
return errors.New("empty upload token")
|
return errors.New("empty upload token")
|
||||||
}
|
}
|
||||||
|
|
||||||
uploaded := uploadedItem{
|
// Create the media item from an UploadToken, optionally adding to an album
|
||||||
AlbumID: albumID,
|
opts = rest.Opts{
|
||||||
UploadToken: uploadToken,
|
Method: "POST",
|
||||||
|
Path: "/mediaItems:batchCreate",
|
||||||
}
|
}
|
||||||
|
var request = api.BatchCreateRequest{
|
||||||
// Save the upload into an album
|
AlbumID: albumID,
|
||||||
var info *api.MediaItem
|
NewMediaItems: []api.NewMediaItem{
|
||||||
if o.fs.batcher.Batching() {
|
{
|
||||||
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
|
SimpleMediaItem: api.SimpleMediaItem{
|
||||||
} else {
|
UploadToken: uploadToken,
|
||||||
errors := make([]error, 1)
|
},
|
||||||
results := make([]*api.MediaItem, 1)
|
},
|
||||||
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
|
},
|
||||||
if err != nil {
|
|
||||||
err = errors[0]
|
|
||||||
info = results[0]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
var result api.BatchCreateResponse
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to commit batch: %w", err)
|
return errors.Wrap(err, "failed to create media item")
|
||||||
}
|
}
|
||||||
|
if len(result.NewMediaItemResults) != 1 {
|
||||||
o.setMetaData(info)
|
return errors.New("bad response to BatchCreate wrong number of items")
|
||||||
|
}
|
||||||
|
mediaItemResult := result.NewMediaItemResults[0]
|
||||||
|
if mediaItemResult.Status.Code != 0 {
|
||||||
|
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
|
||||||
|
}
|
||||||
|
o.setMetaData(&mediaItemResult.MediaItem)
|
||||||
|
|
||||||
// Add upload to internal storage
|
// Add upload to internal storage
|
||||||
if pattern.isUpload {
|
if pattern.isUpload {
|
||||||
@@ -1167,7 +1059,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
albumTitle, fileName := match[1], match[2]
|
albumTitle, fileName := match[1], match[2]
|
||||||
album, ok := o.fs.albums[false].get(albumTitle)
|
album, ok := o.fs.albums[false].get(albumTitle)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
|
||||||
}
|
}
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -1183,7 +1075,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't delete item from album: %w", err)
|
return errors.Wrap(err, "couldn't delete item from album")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package googlephotos
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -36,7 +37,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if err == fs.ErrorNotFoundInConfigFile {
|
||||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
in, err := srcObj.Open(ctx)
|
in, err := srcObj.Open(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, remote, dstObj.Remote())
|
assert.Equal(t, remote, dstObj.Remote())
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
t.Run("ObjectOpen", func(t *testing.T) {
|
t.Run("ObjectOpen", func(t *testing.T) {
|
||||||
in, err := dstObj.Open(ctx)
|
in, err := dstObj.Open(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
buf, err := io.ReadAll(in)
|
buf, err := ioutil.ReadAll(in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, in.Close())
|
require.NoError(t, in.Close())
|
||||||
assert.True(t, len(buf) > 1000)
|
assert.True(t, len(buf) > 1000)
|
||||||
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
in, err := srcObj.Open(ctx)
|
in, err := srcObj.Open(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, remote, dstObj.Remote())
|
assert.Equal(t, remote, dstObj.Remote())
|
||||||
_ = in.Close()
|
_ = in.Close()
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
)
|
)
|
||||||
@@ -269,7 +270,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
|||||||
year := match[1]
|
year := match[1]
|
||||||
current, err := time.Parse("2006", year)
|
current, err := time.Parse("2006", year)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("bad year %q", match[1])
|
return nil, errors.Errorf("bad year %q", match[1])
|
||||||
}
|
}
|
||||||
currentYear := current.Year()
|
currentYear := current.Year()
|
||||||
for current.Year() == currentYear {
|
for current.Year() == currentYear {
|
||||||
@@ -283,7 +284,7 @@ func days(ctx context.Context, f lister, prefix string, match []string) (entries
|
|||||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||||
year, err := strconv.Atoi(match[1])
|
year, err := strconv.Atoi(match[1])
|
||||||
if err != nil || year < 1000 || year > 3000 {
|
if err != nil || year < 1000 || year > 3000 {
|
||||||
return sf, fmt.Errorf("bad year %q", match[1])
|
return sf, errors.Errorf("bad year %q", match[1])
|
||||||
}
|
}
|
||||||
sf = api.SearchFilter{
|
sf = api.SearchFilter{
|
||||||
Filters: &api.Filters{
|
Filters: &api.Filters{
|
||||||
@@ -299,14 +300,14 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
|||||||
if len(match) >= 3 {
|
if len(match) >= 3 {
|
||||||
month, err := strconv.Atoi(match[2])
|
month, err := strconv.Atoi(match[2])
|
||||||
if err != nil || month < 1 || month > 12 {
|
if err != nil || month < 1 || month > 12 {
|
||||||
return sf, fmt.Errorf("bad month %q", match[2])
|
return sf, errors.Errorf("bad month %q", match[2])
|
||||||
}
|
}
|
||||||
sf.Filters.DateFilter.Dates[0].Month = month
|
sf.Filters.DateFilter.Dates[0].Month = month
|
||||||
}
|
}
|
||||||
if len(match) >= 4 {
|
if len(match) >= 4 {
|
||||||
day, err := strconv.Atoi(match[3])
|
day, err := strconv.Atoi(match[3])
|
||||||
if err != nil || day < 1 || day > 31 {
|
if err != nil || day < 1 || day > 31 {
|
||||||
return sf, fmt.Errorf("bad day %q", match[3])
|
return sf, errors.Errorf("bad day %q", match[3])
|
||||||
}
|
}
|
||||||
sf.Filters.DateFilter.Dates[0].Day = day
|
sf.Filters.DateFilter.Dates[0].Day = day
|
||||||
}
|
}
|
||||||
@@ -315,7 +316,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
|||||||
|
|
||||||
// featureFilter creates a filter for the Feature enum
|
// featureFilter creates a filter for the Feature enum
|
||||||
//
|
//
|
||||||
// The API only supports one feature, FAVORITES, so hardcode that feature.
|
// The API only supports one feature, FAVORITES, so hardcode that feature
|
||||||
//
|
//
|
||||||
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
|
||||||
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums,
|
|||||||
|
|
||||||
// mock listUploads for testing
|
// mock listUploads for testing
|
||||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries = f.uploaded[dir]
|
entries, _ = f.uploaded[dir]
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,188 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command the backend to run a named command
|
|
||||||
//
|
|
||||||
// The command run is name
|
|
||||||
// args may be used to read arguments from
|
|
||||||
// opts may be used to read optional arguments from
|
|
||||||
//
|
|
||||||
// The result should be capable of being JSON encoded
|
|
||||||
// If it is a string or a []string it will be shown to the user
|
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
|
||||||
switch name {
|
|
||||||
case "drop":
|
|
||||||
return nil, f.db.Stop(true)
|
|
||||||
case "dump", "fulldump":
|
|
||||||
return nil, f.dbDump(ctx, name == "fulldump", "")
|
|
||||||
case "import", "stickyimport":
|
|
||||||
sticky := name == "stickyimport"
|
|
||||||
if len(arg) != 2 {
|
|
||||||
return nil, errors.New("please provide checksum type and path to sum file")
|
|
||||||
}
|
|
||||||
return nil, f.dbImport(ctx, arg[0], arg[1], sticky)
|
|
||||||
default:
|
|
||||||
return nil, fs.ErrorCommandNotFound
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
|
||||||
Name: "drop",
|
|
||||||
Short: "Drop cache",
|
|
||||||
Long: `Completely drop checksum cache.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend drop hasher:
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "dump",
|
|
||||||
Short: "Dump the database",
|
|
||||||
Long: "Dump cache records covered by the current remote",
|
|
||||||
}, {
|
|
||||||
Name: "fulldump",
|
|
||||||
Short: "Full dump of the database",
|
|
||||||
Long: "Dump all cache records in the database",
|
|
||||||
}, {
|
|
||||||
Name: "import",
|
|
||||||
Short: "Import a SUM file",
|
|
||||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
Name: "stickyimport",
|
|
||||||
Short: "Perform fast import of a SUM file",
|
|
||||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
|
||||||
Usage Example:
|
|
||||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
|
||||||
`,
|
|
||||||
}}
|
|
||||||
|
|
||||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
|
||||||
if root == "" {
|
|
||||||
remoteFs, err := cache.Get(ctx, f.opt.Remote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
|
||||||
}
|
|
||||||
if f.db == nil {
|
|
||||||
if f.opt.MaxAge == 0 {
|
|
||||||
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
|
|
||||||
} else {
|
|
||||||
fs.Errorf(f, "db not found.")
|
|
||||||
}
|
|
||||||
return kv.ErrInactive
|
|
||||||
}
|
|
||||||
op := &kvDump{
|
|
||||||
full: full,
|
|
||||||
root: root,
|
|
||||||
path: f.db.Path(),
|
|
||||||
fs: f,
|
|
||||||
}
|
|
||||||
err := f.db.Do(false, op)
|
|
||||||
if err == kv.ErrEmpty {
|
|
||||||
fs.Infof(op.path, "empty")
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bool) error {
|
|
||||||
var hashType hash.Type
|
|
||||||
if err := hashType.Set(hashName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if hashType == hash.None {
|
|
||||||
return errors.New("please provide a valid hash type")
|
|
||||||
}
|
|
||||||
if !f.suppHashes.Contains(hashType) {
|
|
||||||
return errors.New("unsupported hash type")
|
|
||||||
}
|
|
||||||
if !f.keepHashes.Contains(hashType) {
|
|
||||||
fs.Infof(nil, "Need not import hashes of this type")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, sumPath, err := fspath.SplitFs(sumRemote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sumFs, err := cache.Get(ctx, sumRemote)
|
|
||||||
switch err {
|
|
||||||
case fs.ErrorIsFile:
|
|
||||||
// ok
|
|
||||||
case nil:
|
|
||||||
return fmt.Errorf("not a file: %s", sumRemote)
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sumObj, err := sumFs.NewObject(ctx, path.Base(sumPath))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot open sum file: %w", err)
|
|
||||||
}
|
|
||||||
hashes, err := operations.ParseSumFile(ctx, sumObj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to parse sum file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sticky {
|
|
||||||
rootPath := f.Fs.Root()
|
|
||||||
for remote, hashVal := range hashes {
|
|
||||||
key := path.Join(rootPath, remote)
|
|
||||||
hashSums := operations.HashSums{hashName: hashVal}
|
|
||||||
if err := f.putRawHashes(ctx, key, anyFingerprint, hashSums); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Infof(nil, "Summary: %d checksum(s) imported", len(hashes))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const longImportThreshold = 100
|
|
||||||
if len(hashes) > longImportThreshold {
|
|
||||||
fs.Infof(nil, "Importing %d checksums. Please wait...", len(hashes))
|
|
||||||
}
|
|
||||||
|
|
||||||
doneCount := 0
|
|
||||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
|
||||||
remote := obj.Remote()
|
|
||||||
hash := hashes[remote]
|
|
||||||
hashes[remote] = "" // mark as handled
|
|
||||||
o, ok := obj.(*Object)
|
|
||||||
if ok && hash != "" {
|
|
||||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
|
||||||
}
|
|
||||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
|
||||||
doneCount++
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Import failed: %v", err)
|
|
||||||
}
|
|
||||||
skipCount := 0
|
|
||||||
for remote, emptyOrDone := range hashes {
|
|
||||||
if emptyOrDone != "" {
|
|
||||||
fs.Infof(nil, "Skip vanished object: %s", remote)
|
|
||||||
skipCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Infof(nil, "Summary: %d imported, %d skipped", doneCount, skipCount)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
@@ -1,540 +0,0 @@
|
|||||||
// Package hasher implements a checksum handling overlay backend
|
|
||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/gob"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "hasher",
|
|
||||||
Description: "Better checksums for other remotes",
|
|
||||||
NewFs: NewFs,
|
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
||||||
},
|
|
||||||
CommandHelp: commandHelp,
|
|
||||||
Options: []fs.Option{{
|
|
||||||
Name: "remote",
|
|
||||||
Required: true,
|
|
||||||
Help: "Remote to cache checksums for (e.g. myRemote:path).",
|
|
||||||
}, {
|
|
||||||
Name: "hashes",
|
|
||||||
Default: fs.CommaSepList{"md5", "sha1"},
|
|
||||||
Advanced: false,
|
|
||||||
Help: "Comma separated list of supported checksum types.",
|
|
||||||
}, {
|
|
||||||
Name: "max_age",
|
|
||||||
Advanced: false,
|
|
||||||
Default: fs.DurationOff,
|
|
||||||
Help: "Maximum time to keep checksums in cache (0 = no cache, off = cache forever).",
|
|
||||||
}, {
|
|
||||||
Name: "auto_size",
|
|
||||||
Advanced: true,
|
|
||||||
Default: fs.SizeSuffix(0),
|
|
||||||
Help: "Auto-update checksum for files smaller than this size (disabled by default).",
|
|
||||||
}},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
Remote string `config:"remote"`
|
|
||||||
Hashes fs.CommaSepList `config:"hashes"`
|
|
||||||
AutoSize fs.SizeSuffix `config:"auto_size"`
|
|
||||||
MaxAge fs.Duration `config:"max_age"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
|
||||||
type Fs struct {
|
|
||||||
fs.Fs
|
|
||||||
name string
|
|
||||||
root string
|
|
||||||
wrapper fs.Fs
|
|
||||||
features *fs.Features
|
|
||||||
opt *Options
|
|
||||||
db *kv.DB
|
|
||||||
// fingerprinting
|
|
||||||
fpTime bool // true if using time in fingerprints
|
|
||||||
fpHash hash.Type // hash type to use in fingerprints or None
|
|
||||||
// hash types triaged by groups
|
|
||||||
suppHashes hash.Set // all supported checksum types
|
|
||||||
passHashes hash.Set // passed directly to the base without caching
|
|
||||||
slowHashes hash.Set // passed to the base and then cached
|
|
||||||
autoHashes hash.Set // calculated in-house and cached
|
|
||||||
keepHashes hash.Set // checksums to keep in cache (slow + auto)
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnExperimental sync.Once
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the remote:path string
|
|
||||||
func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs.Fs, error) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
return nil, errors.New("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
warnExperimental.Do(func() {
|
|
||||||
fs.Infof(nil, "Hasher is EXPERIMENTAL!")
|
|
||||||
})
|
|
||||||
|
|
||||||
opt := &Options{}
|
|
||||||
err := configstruct.Set(cmap, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(opt.Remote, fsname+":") {
|
|
||||||
return nil, errors.New("can't point remote at itself")
|
|
||||||
}
|
|
||||||
remotePath := fspath.JoinRootPath(opt.Remote, rpath)
|
|
||||||
baseFs, err := cache.Get(ctx, remotePath)
|
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
|
||||||
return nil, fmt.Errorf("failed to derive base remote %q: %w", opt.Remote, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
Fs: baseFs,
|
|
||||||
name: fsname,
|
|
||||||
root: rpath,
|
|
||||||
opt: opt,
|
|
||||||
}
|
|
||||||
// Correct root if definitely pointing to a file
|
|
||||||
if err == fs.ErrorIsFile {
|
|
||||||
f.root = path.Dir(f.root)
|
|
||||||
if f.root == "." || f.root == "/" {
|
|
||||||
f.root = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
baseFeatures := baseFs.Features()
|
|
||||||
f.fpTime = baseFs.Precision() != fs.ModTimeNotSupported
|
|
||||||
|
|
||||||
if baseFeatures.SlowHash {
|
|
||||||
f.slowHashes = f.Fs.Hashes()
|
|
||||||
} else {
|
|
||||||
f.passHashes = f.Fs.Hashes()
|
|
||||||
f.fpHash = f.passHashes.GetOne()
|
|
||||||
}
|
|
||||||
|
|
||||||
f.suppHashes = f.passHashes
|
|
||||||
f.suppHashes.Add(f.slowHashes.Array()...)
|
|
||||||
|
|
||||||
for _, hashName := range opt.Hashes {
|
|
||||||
var ht hash.Type
|
|
||||||
if err := ht.Set(hashName); err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid token %q in hash string %q", hashName, opt.Hashes.String())
|
|
||||||
}
|
|
||||||
if !f.slowHashes.Contains(ht) {
|
|
||||||
f.autoHashes.Add(ht)
|
|
||||||
}
|
|
||||||
f.keepHashes.Add(ht)
|
|
||||||
f.suppHashes.Add(ht)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(f, "Groups by usage: cached %s, passed %s, auto %s, slow %s, supported %s",
|
|
||||||
f.keepHashes, f.passHashes, f.autoHashes, f.slowHashes, f.suppHashes)
|
|
||||||
|
|
||||||
var nilSet hash.Set
|
|
||||||
if f.keepHashes == nilSet {
|
|
||||||
return nil, errors.New("configured hash_names have nothing to keep in cache")
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.opt.MaxAge > 0 {
|
|
||||||
gob.Register(hashRecord{})
|
|
||||||
db, err := kv.Start(ctx, "hasher", f.Fs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.db = db
|
|
||||||
}
|
|
||||||
|
|
||||||
stubFeatures := &fs.Features{
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
IsLocal: true,
|
|
||||||
ReadMimeType: true,
|
|
||||||
WriteMimeType: true,
|
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
ReadMetadata: true,
|
|
||||||
WriteMetadata: true,
|
|
||||||
UserMetadata: true,
|
|
||||||
PartialUploads: true,
|
|
||||||
}
|
|
||||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
|
||||||
|
|
||||||
cache.PinUntilFinalized(f.Fs, f)
|
|
||||||
return f, err
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Filesystem
|
|
||||||
//
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string { return f.name }
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string { return f.root }
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features { return f.features }
|
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
|
||||||
func (f *Fs) Hashes() hash.Set { return f.suppHashes }
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
// The "hasher::" prefix is a distinctive feature.
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("hasher::%s:%s", f.name, f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
|
||||||
func (f *Fs) UnWrap() fs.Fs { return f.Fs }
|
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs { return f.wrapper }
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) { f.wrapper = wrapper }
|
|
||||||
|
|
||||||
// Wrap base entries into hasher entries.
|
|
||||||
func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries, err error) {
|
|
||||||
hashEntries = baseEntries[:0] // work inplace
|
|
||||||
for _, entry := range baseEntries {
|
|
||||||
switch x := entry.(type) {
|
|
||||||
case fs.Object:
|
|
||||||
obj, err := f.wrapObject(x, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hashEntries = append(hashEntries, obj)
|
|
||||||
default:
|
|
||||||
hashEntries = append(hashEntries, entry) // trash in - trash out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashEntries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.wrapEntries(entries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListR lists the objects and directories recursively into out.
|
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
|
||||||
return f.Fs.Features().ListR(ctx, dir, func(baseEntries fs.DirEntries) error {
|
|
||||||
hashEntries, err := f.wrapEntries(baseEntries)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return callback(hashEntries)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge a directory
|
|
||||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
||||||
if do := f.Fs.Features().Purge; do != nil {
|
|
||||||
if err := do(ctx, dir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err := f.db.Do(true, &kvPurge{
|
|
||||||
dir: path.Join(f.Fs.Root(), dir),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(f, "Failed to purge some hashes: %v", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fs.ErrorCantPurge
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutStream uploads to the remote path with undeterminate size.
|
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if do := f.Fs.Features().PutStream; do != nil {
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := do(ctx, in, src, options...)
|
|
||||||
return f.wrapObject(oResult, err)
|
|
||||||
}
|
|
||||||
return nil, errors.New("PutStream not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked uploads the object, allowing duplicates.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if do := f.Fs.Features().PutUnchecked; do != nil {
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := do(ctx, in, src, options...)
|
|
||||||
return f.wrapObject(oResult, err)
|
|
||||||
}
|
|
||||||
return nil, errors.New("PutUnchecked not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// pruneHash deletes hash for a path
|
|
||||||
func (f *Fs) pruneHash(remote string) error {
|
|
||||||
return f.db.Do(true, &kvPrune{
|
|
||||||
key: path.Join(f.Fs.Root(), remote),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
||||||
if do := f.Fs.Features().CleanUp; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
||||||
if do := f.Fs.Features().About; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil, errors.New("not supported by underlying remote")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
|
||||||
if do := f.Fs.Features().ChangeNotify; do != nil {
|
|
||||||
do(ctx, notifyFunc, pollIntervalChan)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserInfo returns info about the connected user
|
|
||||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
|
||||||
if do := f.Fs.Features().UserInfo; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect the current user
|
|
||||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
|
||||||
if do := f.Fs.Features().Disconnect; do != nil {
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
if do := f.Fs.Features().MergeDirs; do != nil {
|
|
||||||
return do(ctx, dirs)
|
|
||||||
}
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
if do := f.Fs.Features().DirCacheFlush; do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
||||||
if do := f.Fs.Features().PublicLink; do != nil {
|
|
||||||
return do(ctx, remote, expire, unlink)
|
|
||||||
}
|
|
||||||
return "", errors.New("PublicLink not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.Fs.Features().Copy
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
o, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorCantCopy
|
|
||||||
}
|
|
||||||
oResult, err := do(ctx, o.Object, remote)
|
|
||||||
return f.wrapObject(oResult, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
do := f.Fs.Features().Move
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
o, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
oResult, err := do(ctx, o.Object, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
_ = f.db.Do(true, &kvMove{
|
|
||||||
src: path.Join(f.Fs.Root(), src.Remote()),
|
|
||||||
dst: path.Join(f.Fs.Root(), remote),
|
|
||||||
dir: false,
|
|
||||||
fs: f,
|
|
||||||
})
|
|
||||||
return f.wrapObject(oResult, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
|
||||||
do := f.Fs.Features().DirMove
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
err := do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
|
||||||
if err == nil {
|
|
||||||
_ = f.db.Do(true, &kvMove{
|
|
||||||
src: path.Join(srcFs.Fs.Root(), srcRemote),
|
|
||||||
dst: path.Join(f.Fs.Root(), dstRemote),
|
|
||||||
dir: true,
|
|
||||||
fs: f,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
|
||||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
|
||||||
if f.db != nil {
|
|
||||||
err = f.db.Stop(false)
|
|
||||||
}
|
|
||||||
if do := f.Fs.Features().Shutdown; do != nil {
|
|
||||||
if err2 := do(ctx); err2 != nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
o, err := f.Fs.NewObject(ctx, remote)
|
|
||||||
return f.wrapObject(o, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// Object
|
|
||||||
//
|
|
||||||
|
|
||||||
// Object represents a composite file wrapping one or more data chunks
|
|
||||||
type Object struct {
|
|
||||||
fs.Object
|
|
||||||
f *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap base object into hasher object
|
|
||||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
|
||||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if o == nil {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return &Object{Object: o, f: f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info { return o.f }
|
|
||||||
|
|
||||||
// UnWrap returns the wrapped Object
|
|
||||||
func (o *Object) UnWrap() fs.Object { return o.Object }
|
|
||||||
|
|
||||||
// Return a string version
|
|
||||||
func (o *Object) String() string {
|
|
||||||
if o == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return o.Object.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if possible
|
|
||||||
func (o *Object) ID() string {
|
|
||||||
if doer, ok := o.Object.(fs.IDer); ok {
|
|
||||||
return doer.ID()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns the Tier of the Object if possible
|
|
||||||
func (o *Object) GetTier() string {
|
|
||||||
if doer, ok := o.Object.(fs.GetTierer); ok {
|
|
||||||
return doer.GetTier()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier set the Tier of the Object if possible
|
|
||||||
func (o *Object) SetTier(tier string) error {
|
|
||||||
if doer, ok := o.Object.(fs.SetTierer); ok {
|
|
||||||
return doer.SetTier(tier)
|
|
||||||
}
|
|
||||||
return errors.New("SetTier not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
if doer, ok := o.Object.(fs.MimeTyper); ok {
|
|
||||||
return doer.MimeType(ctx)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
|
||||||
do, ok := o.Object.(fs.Metadataer)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return do.Metadata(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.Purger = (*Fs)(nil)
|
|
||||||
_ fs.Copier = (*Fs)(nil)
|
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
_ fs.Commander = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
|
||||||
_ fs.CleanUpper = (*Fs)(nil)
|
|
||||||
_ fs.UnWrapper = (*Fs)(nil)
|
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
|
||||||
_ fs.FullObject = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func putFile(ctx context.Context, t *testing.T, f fs.Fs, name, data string) fs.Object {
|
|
||||||
mtime1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
item := fstest.Item{Path: name, ModTime: mtime1}
|
|
||||||
o := fstests.PutTestContents(ctx, t, f, &item, data, true)
|
|
||||||
require.NotNil(t, o)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
|
||||||
// make a temporary local remote
|
|
||||||
tempRoot, err := fstest.LocalRemote()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
_ = os.RemoveAll(tempRoot)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// make a temporary crypt remote
|
|
||||||
ctx := context.Background()
|
|
||||||
pass := obscure.MustObscure("crypt")
|
|
||||||
remote := fmt.Sprintf(`:crypt,remote="%s",password="%s":`, tempRoot, pass)
|
|
||||||
cryptFs, err := fs.NewFs(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// make a test file on the crypt remote
|
|
||||||
const dirName = "from_crypt_1"
|
|
||||||
const fileName = dirName + "/file_from_crypt_1"
|
|
||||||
const longTime = fs.ModTimeNotSupported
|
|
||||||
src := putFile(ctx, t, cryptFs, fileName, "doggy froggy")
|
|
||||||
|
|
||||||
// ensure that hash does not exist yet
|
|
||||||
_ = f.pruneHash(fileName)
|
|
||||||
hashType := f.keepHashes.GetOne()
|
|
||||||
hash, err := f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Empty(t, hash)
|
|
||||||
|
|
||||||
// upload file to hasher
|
|
||||||
in, err := src.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
dst, err := f.Put(ctx, in, src)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NotNil(t, dst)
|
|
||||||
|
|
||||||
// check that hash was created
|
|
||||||
if f.opt.MaxAge > 0 {
|
|
||||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEmpty(t, hash)
|
|
||||||
}
|
|
||||||
//t.Logf("hash is %q", hash)
|
|
||||||
_ = operations.Purge(ctx, f, dirName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
t.Skip("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
t.Run("UploadFromCrypt", f.testUploadFromCrypt)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
package hasher_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hasher"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if !kv.Supported() {
|
|
||||||
t.Skip("hasher is not supported on this OS")
|
|
||||||
}
|
|
||||||
opt := fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
NilObject: (*hasher.Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"OpenWriterAt",
|
|
||||||
"OpenChunkWriter",
|
|
||||||
},
|
|
||||||
UnimplementableObjectMethods: []string{},
|
|
||||||
}
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: "TestHasher", Key: "type", Value: "hasher"},
|
|
||||||
{Name: "TestHasher", Key: "remote", Value: tempDir},
|
|
||||||
}
|
|
||||||
opt.RemoteName = "TestHasher:"
|
|
||||||
opt.QuickTestOK = true
|
|
||||||
}
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
// test again with MaxAge = 0
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,315 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/gob"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/kv"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
|
||||||
anyFingerprint = "*"
|
|
||||||
)
|
|
||||||
|
|
||||||
type hashMap map[hash.Type]string
|
|
||||||
|
|
||||||
type hashRecord struct {
|
|
||||||
Fp string // fingerprint
|
|
||||||
Hashes operations.HashSums
|
|
||||||
Created time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashRecord) encode(key string) ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := gob.NewEncoder(&buf).Encode(r); err != nil {
|
|
||||||
fs.Debugf(key, "hasher encoding %v: %v", r, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashRecord) decode(key string, data []byte) error {
|
|
||||||
if err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(r); err != nil {
|
|
||||||
fs.Debugf(key, "hasher decoding %q failed: %v", data, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPrune: prune a single hash
|
|
||||||
type kvPrune struct {
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPrune) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
return b.Delete([]byte(op.key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPurge: delete a subtree
|
|
||||||
type kvPurge struct {
|
|
||||||
dir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPurge) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
dir := op.dir
|
|
||||||
if !strings.HasSuffix(dir, "/") {
|
|
||||||
dir += "/"
|
|
||||||
}
|
|
||||||
var items []string
|
|
||||||
cur := b.Cursor()
|
|
||||||
bkey, _ := cur.Seek([]byte(dir))
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !strings.HasPrefix(key, dir) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
items = append(items, key[len(dir):])
|
|
||||||
bkey, _ = cur.Next()
|
|
||||||
}
|
|
||||||
nerr := 0
|
|
||||||
for _, sub := range items {
|
|
||||||
if err := b.Delete([]byte(dir + sub)); err != nil {
|
|
||||||
nerr++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(dir, "%d hashes purged, %d failed", len(items)-nerr, nerr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvMove: assign hashes to new path
|
|
||||||
type kvMove struct {
|
|
||||||
src string
|
|
||||||
dst string
|
|
||||||
dir bool
|
|
||||||
fs *Fs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvMove) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
src, dst := op.src, op.dst
|
|
||||||
if !op.dir {
|
|
||||||
err := moveHash(b, src, dst)
|
|
||||||
fs.Debugf(op.fs, "moving cached hash %s to %s (err: %v)", src, dst, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(src, "/") {
|
|
||||||
src += "/"
|
|
||||||
}
|
|
||||||
if !strings.HasSuffix(dst, "/") {
|
|
||||||
dst += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
var items []string
|
|
||||||
cur := b.Cursor()
|
|
||||||
bkey, _ := cur.Seek([]byte(src))
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !strings.HasPrefix(key, src) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
items = append(items, key[len(src):])
|
|
||||||
bkey, _ = cur.Next()
|
|
||||||
}
|
|
||||||
|
|
||||||
nerr := 0
|
|
||||||
for _, suffix := range items {
|
|
||||||
srcKey, dstKey := src+suffix, dst+suffix
|
|
||||||
err := moveHash(b, srcKey, dstKey)
|
|
||||||
fs.Debugf(op.fs, "Rename cache record %s -> %s (err: %v)", srcKey, dstKey, err)
|
|
||||||
if err != nil {
|
|
||||||
nerr++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Debugf(op.fs, "%d hashes moved, %d failed", len(items)-nerr, nerr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func moveHash(b kv.Bucket, src, dst string) error {
|
|
||||||
data := b.Get([]byte(src))
|
|
||||||
err := b.Delete([]byte(src))
|
|
||||||
if err != nil || len(data) == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return b.Put([]byte(dst), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvGet: get single hash from database
|
|
||||||
type kvGet struct {
|
|
||||||
key string
|
|
||||||
fp string
|
|
||||||
hash string
|
|
||||||
val string
|
|
||||||
age time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvGet) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
data := b.Get([]byte(op.key))
|
|
||||||
if len(data) == 0 {
|
|
||||||
return errors.New("no record")
|
|
||||||
}
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(op.key, data); err != nil {
|
|
||||||
return errors.New("invalid record")
|
|
||||||
}
|
|
||||||
if !(r.Fp == anyFingerprint || op.fp == anyFingerprint || r.Fp == op.fp) {
|
|
||||||
return errors.New("fingerprint changed")
|
|
||||||
}
|
|
||||||
if time.Since(r.Created) > op.age {
|
|
||||||
return errors.New("record timed out")
|
|
||||||
}
|
|
||||||
if r.Hashes != nil {
|
|
||||||
op.val = r.Hashes[op.hash]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvPut: set hashes for an object by key
|
|
||||||
type kvPut struct {
|
|
||||||
key string
|
|
||||||
fp string
|
|
||||||
hashes operations.HashSums
|
|
||||||
age time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
|
||||||
data := b.Get([]byte(op.key))
|
|
||||||
var r hashRecord
|
|
||||||
if len(data) > 0 {
|
|
||||||
err = r.decode(op.key, data)
|
|
||||||
if err != nil || r.Fp != op.fp || time.Since(r.Created) > op.age {
|
|
||||||
r.Hashes = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(r.Hashes) == 0 {
|
|
||||||
r.Created = time.Now()
|
|
||||||
r.Hashes = operations.HashSums{}
|
|
||||||
r.Fp = op.fp
|
|
||||||
}
|
|
||||||
|
|
||||||
for hashType, hashVal := range op.hashes {
|
|
||||||
r.Hashes[hashType] = hashVal
|
|
||||||
}
|
|
||||||
if data, err = r.encode(op.key); err != nil {
|
|
||||||
return fmt.Errorf("marshal failed: %w", err)
|
|
||||||
}
|
|
||||||
if err = b.Put([]byte(op.key), data); err != nil {
|
|
||||||
return fmt.Errorf("put failed: %w", err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// kvDump: dump the database.
|
|
||||||
// Note: long dump can cause concurrent operations to fail.
|
|
||||||
type kvDump struct {
|
|
||||||
full bool
|
|
||||||
root string
|
|
||||||
path string
|
|
||||||
fs *Fs
|
|
||||||
num int
|
|
||||||
total int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *kvDump) Do(ctx context.Context, b kv.Bucket) error {
|
|
||||||
f, baseRoot, dbPath := op.fs, op.root, op.path
|
|
||||||
|
|
||||||
if op.full {
|
|
||||||
total := 0
|
|
||||||
num := 0
|
|
||||||
_ = b.ForEach(func(bkey, data []byte) error {
|
|
||||||
total++
|
|
||||||
key := string(bkey)
|
|
||||||
include := (baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/"))
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(key, data); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fmt.Println(f.dumpLine(&r, key, include, nil))
|
|
||||||
if include {
|
|
||||||
num++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
fs.Infof(dbPath, "%d records out of %d", num, total)
|
|
||||||
op.num, op.total = num, total // for unit tests
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
num := 0
|
|
||||||
cur := b.Cursor()
|
|
||||||
var bkey, data []byte
|
|
||||||
if baseRoot != "" {
|
|
||||||
bkey, data = cur.Seek([]byte(baseRoot))
|
|
||||||
} else {
|
|
||||||
bkey, data = cur.First()
|
|
||||||
}
|
|
||||||
for bkey != nil {
|
|
||||||
key := string(bkey)
|
|
||||||
if !(baseRoot == "" || key == baseRoot || strings.HasPrefix(key, baseRoot+"/")) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var r hashRecord
|
|
||||||
if err := r.decode(key, data); err != nil {
|
|
||||||
fs.Errorf(nil, "%s: invalid record: %v", key, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if key = strings.TrimPrefix(key[len(baseRoot):], "/"); key == "" {
|
|
||||||
key = "/"
|
|
||||||
}
|
|
||||||
fmt.Println(f.dumpLine(&r, key, true, nil))
|
|
||||||
num++
|
|
||||||
bkey, data = cur.Next()
|
|
||||||
}
|
|
||||||
fs.Infof(dbPath, "%d records", num)
|
|
||||||
op.num = num // for unit tests
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) string {
|
|
||||||
var status string
|
|
||||||
switch {
|
|
||||||
case !include:
|
|
||||||
status = "ext"
|
|
||||||
case err != nil:
|
|
||||||
status = "bad"
|
|
||||||
case r.Fp == anyFingerprint:
|
|
||||||
status = "stk"
|
|
||||||
default:
|
|
||||||
status = "ok "
|
|
||||||
}
|
|
||||||
|
|
||||||
var hashes []string
|
|
||||||
for _, hashType := range f.keepHashes.Array() {
|
|
||||||
hashName := hashType.String()
|
|
||||||
hashVal := r.Hashes[hashName]
|
|
||||||
if hashVal == "" || err != nil {
|
|
||||||
hashVal = "-"
|
|
||||||
}
|
|
||||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
|
||||||
hashes = append(hashes, hashName+":"+hashVal)
|
|
||||||
}
|
|
||||||
hashesStr := strings.Join(hashes, " ")
|
|
||||||
|
|
||||||
age := time.Since(r.Created).Round(time.Second)
|
|
||||||
if age > 24*time.Hour {
|
|
||||||
age = age.Round(time.Hour)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
age = 0
|
|
||||||
}
|
|
||||||
ageStr := age.String()
|
|
||||||
if strings.HasSuffix(ageStr, "h0m0s") {
|
|
||||||
ageStr = strings.TrimSuffix(ageStr, "0m0s")
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%s %s %9s %s", status, hashesStr, ageStr, path)
|
|
||||||
}
|
|
||||||
@@ -1,304 +0,0 @@
|
|||||||
package hasher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
)
|
|
||||||
|
|
||||||
// obtain hash for an object
|
|
||||||
func (o *Object) getHash(ctx context.Context, hashType hash.Type) (string, error) {
|
|
||||||
maxAge := time.Duration(o.f.opt.MaxAge)
|
|
||||||
if maxAge <= 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
fp := o.fingerprint(ctx)
|
|
||||||
if fp == "" {
|
|
||||||
return "", errors.New("fingerprint failed")
|
|
||||||
}
|
|
||||||
return o.f.getRawHash(ctx, hashType, o.Remote(), fp, maxAge)
|
|
||||||
}
|
|
||||||
|
|
||||||
// obtain hash for a path
|
|
||||||
func (f *Fs) getRawHash(ctx context.Context, hashType hash.Type, remote, fp string, age time.Duration) (string, error) {
|
|
||||||
key := path.Join(f.Fs.Root(), remote)
|
|
||||||
op := &kvGet{
|
|
||||||
key: key,
|
|
||||||
fp: fp,
|
|
||||||
hash: hashType.String(),
|
|
||||||
age: age,
|
|
||||||
}
|
|
||||||
err := f.db.Do(false, op)
|
|
||||||
return op.val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// put new hashes for an object
|
|
||||||
func (o *Object) putHashes(ctx context.Context, rawHashes hashMap) error {
|
|
||||||
if o.f.opt.MaxAge <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fp := o.fingerprint(ctx)
|
|
||||||
if fp == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
key := path.Join(o.f.Fs.Root(), o.Remote())
|
|
||||||
hashes := operations.HashSums{}
|
|
||||||
for hashType, hashVal := range rawHashes {
|
|
||||||
hashes[hashType.String()] = hashVal
|
|
||||||
}
|
|
||||||
return o.f.putRawHashes(ctx, key, fp, hashes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set hashes for a path without any validation
|
|
||||||
func (f *Fs) putRawHashes(ctx context.Context, key, fp string, hashes operations.HashSums) error {
|
|
||||||
return f.db.Do(true, &kvPut{
|
|
||||||
key: key,
|
|
||||||
fp: fp,
|
|
||||||
hashes: hashes,
|
|
||||||
age: time.Duration(f.opt.MaxAge),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file or "" if unavailable.
|
|
||||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string, err error) {
|
|
||||||
f := o.f
|
|
||||||
if f.passHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "pass %s", hashType)
|
|
||||||
return o.Object.Hash(ctx, hashType)
|
|
||||||
}
|
|
||||||
if !f.suppHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "unsupp %s", hashType)
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
|
||||||
fs.Debugf(o, "getHash: %v", err)
|
|
||||||
err = nil
|
|
||||||
hashVal = ""
|
|
||||||
}
|
|
||||||
if hashVal != "" {
|
|
||||||
fs.Debugf(o, "cached %s = %q", hashType, hashVal)
|
|
||||||
return hashVal, nil
|
|
||||||
}
|
|
||||||
if f.slowHashes.Contains(hashType) {
|
|
||||||
fs.Debugf(o, "slow %s", hashType)
|
|
||||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
|
||||||
if err == nil && hashVal != "" && f.keepHashes.Contains(hashType) {
|
|
||||||
if err = o.putHashes(ctx, hashMap{hashType: hashVal}); err != nil {
|
|
||||||
fs.Debugf(o, "putHashes: %v", err)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashVal, err
|
|
||||||
}
|
|
||||||
if f.autoHashes.Contains(hashType) && o.Size() < int64(f.opt.AutoSize) {
|
|
||||||
_ = o.updateHashes(ctx)
|
|
||||||
if hashVal, err = o.getHash(ctx, hashType); err != nil {
|
|
||||||
fs.Debugf(o, "auto %s = %q (%v)", hashType, hashVal, err)
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hashVal, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateHashes performs implicit "rclone hashsum --download" and updates cache.
|
|
||||||
func (o *Object) updateHashes(ctx context.Context) error {
|
|
||||||
r, err := o.Open(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Infof(o, "update failed (open): %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = r.Close()
|
|
||||||
}()
|
|
||||||
if _, err = io.Copy(io.Discard, r); err != nil {
|
|
||||||
fs.Infof(o, "update failed (copy): %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the object with the given data, time and size.
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
_ = o.f.pruneHash(src.Remote())
|
|
||||||
return o.Object.Update(ctx, in, src, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove an object.
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
_ = o.f.pruneHash(o.Remote())
|
|
||||||
return o.Object.Remove(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the modification time of the file.
|
|
||||||
// Also prunes the cache entry when modtime changes so that
|
|
||||||
// touching a file will trigger checksum recalculation even
|
|
||||||
// on backends that don't provide modTime with fingerprint.
|
|
||||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
|
||||||
if mtime != o.Object.ModTime(ctx) {
|
|
||||||
_ = o.f.pruneHash(o.Remote())
|
|
||||||
}
|
|
||||||
return o.Object.SetModTime(ctx, mtime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read.
|
|
||||||
// Full reads will also update object hashes.
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadCloser, err error) {
|
|
||||||
size := o.Size()
|
|
||||||
var offset, limit int64 = 0, -1
|
|
||||||
for _, option := range options {
|
|
||||||
switch opt := option.(type) {
|
|
||||||
case *fs.SeekOption:
|
|
||||||
offset = opt.Offset
|
|
||||||
case *fs.RangeOption:
|
|
||||||
offset, limit = opt.Decode(size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if offset < 0 {
|
|
||||||
return nil, errors.New("invalid offset")
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
|
||||||
limit = size - offset
|
|
||||||
}
|
|
||||||
if r, err = o.Object.Open(ctx, options...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if offset != 0 || limit < size {
|
|
||||||
// It's a partial read
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
return o.f.newHashingReader(ctx, r, func(sums hashMap) {
|
|
||||||
if err := o.putHashes(ctx, sums); err != nil {
|
|
||||||
fs.Infof(o, "auto hashing error: %v", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put data into the remote path with given modTime and size
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
var (
|
|
||||||
o fs.Object
|
|
||||||
common hash.Set
|
|
||||||
rehash bool
|
|
||||||
hashes hashMap
|
|
||||||
)
|
|
||||||
if fsrc := src.Fs(); fsrc != nil {
|
|
||||||
common = fsrc.Hashes().Overlap(f.keepHashes)
|
|
||||||
// Rehash if source does not have all required hashes or hashing is slow
|
|
||||||
rehash = fsrc.Features().SlowHash || common != f.keepHashes
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapIn := in
|
|
||||||
if rehash {
|
|
||||||
r, err := f.newHashingReader(ctx, in, func(sums hashMap) {
|
|
||||||
hashes = sums
|
|
||||||
})
|
|
||||||
fs.Debugf(src, "Rehash in-fly due to incomplete or slow source set %v (err: %v)", common, err)
|
|
||||||
if err == nil {
|
|
||||||
wrapIn = r
|
|
||||||
} else {
|
|
||||||
rehash = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = f.pruneHash(src.Remote())
|
|
||||||
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
|
|
||||||
o, err = f.wrapObject(oResult, err)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !rehash {
|
|
||||||
hashes = hashMap{}
|
|
||||||
for _, ht := range common.Array() {
|
|
||||||
if h, e := src.Hash(ctx, ht); e == nil && h != "" {
|
|
||||||
hashes[ht] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(hashes) > 0 {
|
|
||||||
err := o.(*Object).putHashes(ctx, hashes)
|
|
||||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
|
||||||
}
|
|
||||||
return o, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type hashingReader struct {
|
|
||||||
rd io.Reader
|
|
||||||
hasher *hash.MultiHasher
|
|
||||||
fun func(hashMap)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) newHashingReader(ctx context.Context, rd io.Reader, fun func(hashMap)) (*hashingReader, error) {
|
|
||||||
hasher, err := hash.NewMultiHasherTypes(f.keepHashes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
hr := &hashingReader{
|
|
||||||
rd: rd,
|
|
||||||
hasher: hasher,
|
|
||||||
fun: fun,
|
|
||||||
}
|
|
||||||
return hr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashingReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.rd.Read(p)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.hasher = nil
|
|
||||||
}
|
|
||||||
if r.hasher != nil {
|
|
||||||
if _, errHash := r.hasher.Write(p[:n]); errHash != nil {
|
|
||||||
r.hasher = nil
|
|
||||||
err = errHash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == io.EOF && r.hasher != nil {
|
|
||||||
r.fun(r.hasher.Sums())
|
|
||||||
r.hasher = nil
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *hashingReader) Close() error {
|
|
||||||
if rc, ok := r.rd.(io.ReadCloser); ok {
|
|
||||||
return rc.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return object fingerprint or empty string in case of errors
|
|
||||||
//
|
|
||||||
// Note that we can't use the generic `fs.Fingerprint` here because
|
|
||||||
// this fingerprint is used to pick _derived hashes_ that are slow
|
|
||||||
// to calculate or completely unsupported by the base remote.
|
|
||||||
//
|
|
||||||
// The hasher fingerprint must be based on `fsHash`, the first _fast_
|
|
||||||
// hash supported _by the underlying remote_ (if there is one),
|
|
||||||
// while `fs.Fingerprint` would select a hash _produced by hasher_
|
|
||||||
// creating unresolvable fingerprint loop.
|
|
||||||
func (o *Object) fingerprint(ctx context.Context) string {
|
|
||||||
size := o.Object.Size()
|
|
||||||
timeStr := "-"
|
|
||||||
if o.f.fpTime {
|
|
||||||
timeStr = o.Object.ModTime(ctx).UTC().Format(timeFormat)
|
|
||||||
if timeStr == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hashStr := "-"
|
|
||||||
if o.f.fpHash != hash.None {
|
|
||||||
var err error
|
|
||||||
hashStr, err = o.Object.Hash(ctx, o.f.fpHash)
|
|
||||||
if hashStr == "" || err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%d,%s,%s", size, timeStr, hashStr)
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
package hdfs
|
package hdfs
|
||||||
@@ -21,7 +20,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fs represents a HDFS server
|
// Fs represents a HDFS server
|
||||||
@@ -32,15 +30,8 @@ type Fs struct {
|
|||||||
opt Options // options for this backend
|
opt Options // options for this backend
|
||||||
ci *fs.ConfigInfo // global config
|
ci *fs.ConfigInfo // global config
|
||||||
client *hdfs.Client
|
client *hdfs.Client
|
||||||
pacer *fs.Pacer // pacer for API calls
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
minSleep = 20 * time.Millisecond
|
|
||||||
maxSleep = 10 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||||
func getKerberosClient() (*krb.Client, error) {
|
func getKerberosClient() (*krb.Client, error) {
|
||||||
configPath := os.Getenv("KRB5_CONFIG")
|
configPath := os.Getenv("KRB5_CONFIG")
|
||||||
@@ -93,14 +84,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := hdfs.ClientOptions{
|
options := hdfs.ClientOptions{
|
||||||
Addresses: opt.Namenode,
|
Addresses: []string{opt.Namenode},
|
||||||
UseDatanodeHostname: false,
|
UseDatanodeHostname: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.ServicePrincipalName != "" {
|
if opt.ServicePrincipalName != "" {
|
||||||
options.KerberosClient, err = getKerberosClient()
|
options.KerberosClient, err = getKerberosClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("problem with kerberos authentication: %w", err)
|
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
|
||||||
}
|
}
|
||||||
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
options.KerberosServicePrincipleName = opt.ServicePrincipalName
|
||||||
|
|
||||||
@@ -122,7 +113,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
opt: *opt,
|
opt: *opt,
|
||||||
ci: fs.GetConfig(ctx),
|
ci: fs.GetConfig(ctx),
|
||||||
client: client,
|
client: client,
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -272,98 +262,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.client.RemoveAll(realpath)
|
return f.client.RemoveAll(realpath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
|
||||||
//
|
|
||||||
// This is stored with the remote path given.
|
|
||||||
//
|
|
||||||
// It returns the destination Object and a possible error.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
||||||
srcObj, ok := src.(*Object)
|
|
||||||
if !ok {
|
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
|
||||||
return nil, fs.ErrorCantMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the real paths from the remote specs:
|
|
||||||
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
|
||||||
targetPath := f.realpath(remote)
|
|
||||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
|
||||||
|
|
||||||
// Make sure the target folder exists:
|
|
||||||
dirname := path.Dir(targetPath)
|
|
||||||
err := f.client.MkdirAll(dirname, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
|
||||||
err = f.client.Rename(sourcePath, targetPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look up the resulting object
|
|
||||||
info, err := f.client.Stat(targetPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// And return it:
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
size: info.Size(),
|
|
||||||
modTime: info.ModTime(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
||||||
// using server-side move operations.
|
|
||||||
//
|
|
||||||
// Will only be called if src.Fs().Name() == f.Name()
|
|
||||||
//
|
|
||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
|
||||||
//
|
|
||||||
// If destination exists then return fs.ErrorDirExists
|
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|
||||||
srcFs, ok := src.(*Fs)
|
|
||||||
if !ok {
|
|
||||||
return fs.ErrorCantDirMove
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the real paths from the remote specs:
|
|
||||||
sourcePath := srcFs.realpath(srcRemote)
|
|
||||||
targetPath := f.realpath(dstRemote)
|
|
||||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
|
||||||
|
|
||||||
// Check if the destination exists:
|
|
||||||
info, err := f.client.Stat(targetPath)
|
|
||||||
if err == nil {
|
|
||||||
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
|
||||||
return fs.ErrorDirExists
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the targets parent folder exists:
|
|
||||||
dirname := path.Dir(targetPath)
|
|
||||||
err = f.client.MkdirAll(dirname, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do the move
|
|
||||||
err = f.client.Rename(sourcePath, targetPath)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
info, err := f.client.StatFs()
|
info, err := f.client.StatFs()
|
||||||
@@ -419,6 +317,4 @@ var (
|
|||||||
_ fs.Purger = (*Fs)(nil)
|
_ fs.Purger = (*Fs)(nil)
|
||||||
_ fs.PutStreamer = (*Fs)(nil)
|
_ fs.PutStreamer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Mover = (*Fs)(nil)
|
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
//go:build !plan9
|
|
||||||
// +build !plan9
|
// +build !plan9
|
||||||
|
|
||||||
// Package hdfs provides an interface to the HDFS storage system.
|
|
||||||
package hdfs
|
package hdfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -19,36 +17,42 @@ func init() {
|
|||||||
Description: "Hadoop distributed file system",
|
Description: "Hadoop distributed file system",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "namenode",
|
Name: "namenode",
|
||||||
Help: "Hadoop name nodes and ports.\n\nE.g. \"namenode-1:8020,namenode-2:8020,...\" to connect to host namenodes at port 8020.",
|
Help: "hadoop name node and port",
|
||||||
Required: true,
|
Required: true,
|
||||||
Sensitive: true,
|
Examples: []fs.OptionExample{{
|
||||||
Default: fs.CommaSepList{},
|
Value: "namenode:8020",
|
||||||
|
Help: "Connect to host namenode at port 8020",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "username",
|
Name: "username",
|
||||||
Help: "Hadoop user name.",
|
Help: "hadoop user name",
|
||||||
|
Required: false,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "root",
|
Value: "root",
|
||||||
Help: "Connect to hdfs as root.",
|
Help: "Connect to hdfs as root",
|
||||||
}},
|
}},
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "service_principal_name",
|
Name: "service_principal_name",
|
||||||
Help: `Kerberos service principal name for the namenode.
|
Help: `Kerberos service principal name for the namenode
|
||||||
|
|
||||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
(<SERVICE>/<FQDN>) for the namenode.`,
|
||||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
Required: false,
|
||||||
Advanced: true,
|
Examples: []fs.OptionExample{{
|
||||||
Sensitive: true,
|
Value: "hdfs/namenode.hadoop.docker",
|
||||||
|
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
|
||||||
|
}},
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "data_transfer_protection",
|
Name: "data_transfer_protection",
|
||||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
Help: `Kerberos data transfer protection: authentication|integrity|privacy
|
||||||
|
|
||||||
Specifies whether or not authentication, data signature integrity
|
Specifies whether or not authentication, data signature integrity
|
||||||
checks, and wire encryption are required when communicating with
|
checks, and wire encryption is required when communicating the the
|
||||||
the datanodes. Possible values are 'authentication', 'integrity'
|
datanodes. Possible values are 'authentication', 'integrity' and
|
||||||
and 'privacy'. Used only with KERBEROS enabled.`,
|
'privacy'. Used only with KERBEROS enabled.`,
|
||||||
|
Required: false,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "privacy",
|
Value: "privacy",
|
||||||
Help: "Ensure authentication, integrity and encryption enabled.",
|
Help: "Ensure authentication, integrity and encryption enabled.",
|
||||||
@@ -66,7 +70,7 @@ and 'privacy'. Used only with KERBEROS enabled.`,
|
|||||||
|
|
||||||
// Options for this backend
|
// Options for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Namenode fs.CommaSepList `config:"namenode"`
|
Namenode string `config:"namenode"`
|
||||||
Username string `config:"username"`
|
Username string `config:"username"`
|
||||||
ServicePrincipalName string `config:"service_principal_name"`
|
ServicePrincipalName string `config:"service_principal_name"`
|
||||||
DataTransferProtection string `config:"data_transfer_protection"`
|
DataTransferProtection string `config:"data_transfer_protection"`
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user