1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-08 11:33:33 +00:00

Compare commits

..

17 Commits

Author SHA1 Message Date
Nick Craig-Wood
1bdab29eab Version v1.49.3 2019-09-15 16:42:10 +01:00
Nick Craig-Wood
f77027e6b7 fs/accounting: Fix "file already closed" on transfer retries
This was caused by the recent reworking of the accounting interface.
The Transfer object was recycling the Accounting object without
resetting the stream.

See: https://forum.rclone.org/t/error-file-already-closed/11469/
See: https://forum.rclone.org/t/rclone-b2-sync-post-error-method-not-supported/11718/
2019-09-13 18:37:01 +01:00
Aleksandar Jankovic
f73d0eb920 accounting: fix total duration calculation
Fixes: #3498
2019-09-12 12:33:57 +01:00
Nick Craig-Wood
f1a9d821e4 Version v1.49.2 2019-09-08 16:48:54 +01:00
Nick Craig-Wood
5fe78936d5 test_all: write index.json and add branch, commit and Go version to report 2019-09-08 11:38:18 +01:00
Nick Craig-Wood
4f3eee8d65 build: make sure we add version info to test_all build 2019-09-08 11:38:11 +01:00
Nick Craig-Wood
f2c05bc239 operations: fix -u/--update with google photos / files of unknown size
Before this change if -u/--update was in effect we compared the size
of the files to see if the transfer should go ahead.  This was
comparing -1 with an actual size so the transfer always proceeded.

After this change we use the existing `sizeDiffers` function which
does the correct comparison with -1 for files of unknown length.

See: https://forum.rclone.org/t/sync-with-google-photos-to-local-drive-will-result-in-recoping/11605
2019-09-06 10:11:59 +01:00
Nick Craig-Wood
b463032901 accounting: fix locking in Transfer to avoid deadlock with --progress
Before this change, using -P occasionally deadlocked on the Transfer
mutex when Transfer.Done() was called with a non nil error and the
StatsInfo mutex since they mutually call each other.

This was fixed by making sure that the Transfer mutex is always
released before calling any StatsInfo methods.

This improves on: 6f87267b34

Fixes #3505
2019-09-06 10:10:53 +01:00
Nick Craig-Wood
358decb933 rc: fix docs for config/create /update /password 2019-09-03 08:33:56 +01:00
Nick Craig-Wood
cefa2df3b2 docs: add info on how to build and use the docker images 2019-09-02 14:31:19 +01:00
Alfonso Montero
52efb7e6d0 Add Docker workflow support #3460
* Use a multi-stage build to reduce final image size.
* Run 'quicktest' make target before building.
* Built binary won't run on Alpine unless statically linked.
2019-09-02 14:31:10 +01:00
Nick Craig-Wood
01fa6835c7 gcs: fix need for elevated permissions on SetModTime - fixes #3493
Before this change we used PATCH on the object to update the metadata.

Apparently this requires the "full_control" scope which Google were
unhappy with in their oauth review.

This changes it to update the metadata by copying the object ontop of
itself (which is the way s3 works).  This can be done with normal
permissions.
2019-09-02 12:04:45 +01:00
Cnly
8adf22e294 docs: fix template argument for mktemp in install.sh 2019-09-02 12:04:33 +01:00
Nick Craig-Wood
45f7c687e2 Version v1.49.1 2019-08-28 17:51:23 +01:00
Nick Craig-Wood
a05dd6fc27 config: Fix generated passwords being stored as empty password - Fixes #3492 2019-08-28 14:24:18 +01:00
Nick Craig-Wood
642cb03121 googlephotos,onedrive: fix crash on error response - fixes #3491
This fixes a crash on the google photos backend when an error is
returned from the rest.Call function.

This turned out to be a mis-understanding of the rest docs so
- improved rest.Call docs
- fixed mis-understanding in google photos backend
- fixed similar mis-understading in onedrive backend
2019-08-28 14:24:08 +01:00
Chaitanya
da4dfdc3ec rcd: Added missing parameter for web-gui info logs. 2019-08-28 14:24:04 +01:00
2855 changed files with 760552 additions and 213543 deletions

49
.appveyor.yml Normal file
View File

@@ -0,0 +1,49 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\rclone\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

50
.circleci/config.yml Normal file
View File

@@ -0,0 +1,50 @@
---
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -5,31 +5,19 @@ about: Report a problem with rclone
<!-- <!--
We understand you are having a problem with rclone; we want to help you with that! Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
**STOP and READ** If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
**YOUR POST WILL BE REMOVED IF IT IS LOW QUALITY**:
Please show the effort you've put in to solving the problem and please be specific.
People are volunteering their time to help! Low effort posts are not likely to get good answers!
If you think you might have found a bug, try to replicate it with the latest beta (or stable).
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
If you can still replicate it or just got a question then please use the rclone forum:
https://forum.rclone.org/ https://forum.rclone.org/
for a quick response instead of filing an issue on this repo. instead of filing an issue for a quick response.
If nothing else helps, then please fill in the info below which helps us help you. If you think you might have found a bug, please can you try to replicate it with the latest beta?
**DO NOT REDACT** any information except passwords/keys/personal info. https://beta.rclone.org/
You should use 3 backticks to begin and end your paste to make it readable. If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Make sure to include a log obtained with '-vv'.
You can also use '-vv --log-file bug.log' and a service such as https://pastebin.com or https://gist.github.com/
Thank you Thank you
@@ -37,10 +25,6 @@ The Rclone Developers
--> -->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is the problem you are having with rclone? #### What is the problem you are having with rclone?
@@ -49,26 +33,18 @@ The Rclone Developers
#### Which OS you are using and how many bits (e.g. Windows 7, 64 bit) #### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (e.g. Google Drive) #### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) #### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) #### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -7,16 +7,12 @@ about: Suggest a new feature or enhancement for rclone
Welcome :-) Welcome :-)
So you've got an idea to improve rclone? We love that! So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Probably the latest beta (or stable) release has your feature, so try to update your rclone. Here is a checklist of things to do:
The update instructions are available at https://rclone.org/commands/rclone_selfupdate/
If it still isn't there, here is a checklist of things to do: 1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
1. Search the old issues for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!). 3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-) 4. Be prepared to get involved making the feature :-)
@@ -26,9 +22,6 @@ The Rclone Developers
--> -->
#### The associated forum post URL from `https://forum.rclone.org`
#### What is your current rclone version (output from `rclone version`)? #### What is your current rclone version (output from `rclone version`)?
@@ -41,11 +34,3 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that? #### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Rclone Forum Community Support
url: https://forum.rclone.org/
about: Please ask and answer questions here.

View File

@@ -1,337 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: mac_amd64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.16.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Run librclone tests
shell: bash
run: |
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: build native rclone
run: |
make
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

View File

@@ -1,26 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,34 +0,0 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

7
.gitignore vendored
View File

@@ -1,15 +1,10 @@
*~ *~
_junk/ _junk/
rclone rclone
rclone.exe
build build
docs/public docs/public
rclone.iml rclone.iml
.idea .idea
.history .history
*.test *.test
*.log *.log
*.iml
fuzz-build.zip
*.orig
*.rej

2
.pkgr.yml Normal file
View File

@@ -0,0 +1,2 @@
default_dependencies: false
cli: rclone

128
.travis.yml Normal file
View File

@@ -0,0 +1,128 @@
---
language: go
sudo: required
dist: xenial
os:
- linux
go_import_path: github.com/rclone/rclone
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
install:
- make vars
env:
global:
- GOTAGS=cmount
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
- GO111MODULE=off
- GITHUB_USER=ncw
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
name: Linux
env:
- GOTAGS=cmount
- BUILD_FLAGS='-include "^linux/"'
- DEPLOY=true
script:
- make build_dep
- make check
- make quicktest
- go: 1.12.x
name: Go Modules / Race
env:
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
script:
- make quicktest
- make racequicktest
- go: 1.12.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all
- go: 1.12.x
name: macOS
os: osx
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
- BUILD_FLAGS='-include "^darwin/" -cgo'
- DEPLOY=true
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# name: Windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: rclone/rclone
all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true

View File

@@ -12,10 +12,10 @@ When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/): with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (e.g. output from `rclone -V`) * Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (e.g. Windows 7, 64 bit) * Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`) * The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`) * A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them * if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ## ## Submitting a pull request ##
@@ -33,11 +33,10 @@ page](https://github.com/rclone/rclone).
Now in your terminal Now in your terminal
git clone https://github.com/rclone/rclone.git go get -u github.com/rclone/rclone
cd rclone cd $GOPATH/src/github.com/rclone/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
go build
Make a branch to add your new feature Make a branch to add your new feature
@@ -49,7 +48,7 @@ When ready - run the unit tests for the code you changed
go test -v go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some Note that you may need to make a test remote, eg `TestSwift` for some
of the unit tests. of the unit tests.
Note the top level Makefile targets Note the top level Makefile targets
@@ -73,26 +72,23 @@ Make sure you
When you are done with that When you are done with that
git push -u origin my-new-feature git push origin my-new-feature
Go to the GitHub website and click [Create pull Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/). request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff. You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running: If so, then make the changes in the same branch, squash the commits,
``` rebase it to master then push it to GitHub with `--force`.
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
```
## CI for your fork ## ## Enabling CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository. The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ## ## Testing ##
@@ -100,7 +96,7 @@ rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests. level you can run this to run all the tests.
go test -v ./... go test -v ./...
rclone contains a mixture of unit tests and integration tests. rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can storage systems by mocking all their interfaces, rclone unit tests can
@@ -116,8 +112,8 @@ are skipped if `TestDrive:` isn't defined.
cd backend/drive cd backend/drive
go test -v go test -v
You can then run the integration tests which test all of rclone's You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local file system, operations. Normally these get run against the local filing system,
but they can be run against any of the remotes. but they can be run against any of the remotes.
cd fs/sync cd fs/sync
@@ -128,7 +124,7 @@ but they can be run against any of the remotes.
go test -v -remote TestDrive: go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the all together with an HTML report and test retries then from the
project root: project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/rclone/rclone/fstest/test_all
@@ -156,7 +152,6 @@ with modules beneath.
* ...commands * ...commands
* docs - the documentation and website * docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated * content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code * fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics * accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead * asyncreader - an io.Reader which reads ahead
@@ -166,12 +161,12 @@ with modules beneath.
* fserrors - rclone specific error handling * fserrors - rclone specific error handling
* fshttp - http handling for rclone * fshttp - http handling for rclone
* fspath - path handling for rclone * fspath - path handling for rclone
* hash - defines rclone's hash types and functions * hash - defines rclones hash types and functions
* list - list a remote * list - list a remote
* log - logging facilities * log - logging facilities
* march - iterates directories in lock step * march - iterates directories in lock step
* object - in memory Fs objects * object - in memory Fs objects
* operations - primitives for sync, e.g. Copy, Move * operations - primitives for sync, eg Copy, Move
* sync - sync directories * sync - sync directories
* walk - walk a directory * walk - walk a directory
* fstest - provides integration test framework * fstest - provides integration test framework
@@ -179,7 +174,7 @@ with modules beneath.
* mockdir - mocks an fs.Directory * mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object * mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything * test_all - Runs integration tests for everything
* graphics - the images used in the website, etc. * graphics - the images used in the website etc
* lib - libraries used by the backend * lib - libraries used by the backend
* atexit - register functions to run when rclone exits * atexit - register functions to run when rclone exits
* dircache - directory ID to name caching * dircache - directory ID to name caching
@@ -187,6 +182,7 @@ with modules beneath.
* pacer - retries with backoff and paces operations * pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers * readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST * rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar * vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ## ## Writing Documentation ##
@@ -203,17 +199,14 @@ for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`. config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md` The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated files. The MANUAL.*, rclone.1, web site etc are all auto generated
from those during the release process. See the `make doc` and `make from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature. don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, e.g. Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`. `cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ## ## Making a release ##
There are separate instructions for making a release in the RELEASE.md There are separate instructions for making a release in the RELEASE.md
@@ -266,27 +259,43 @@ rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more) modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies. support in go1.11 and later to manage its dependencies.
rclone can be built with modules outside of the `GOPATH`. **NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
To add a dependency `github.com/ncw/new_dependency` see the To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum`. `go.mod` and `go.sum` and vendor it for older go versions.
GO111MODULE=on go get github.com/ncw/new_dependency GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to. go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod` Please check in the changes generated by `go mod` including the
and `go.sum` in the same commit as your other changes. `vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ## ## Updating a dependency ##
If you need to update a dependency then run If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in a single commit as above. Check in in a single commit as above.
## Updating all the dependencies ## ## Updating all the dependencies ##
@@ -332,11 +341,6 @@ Getting going
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests Unit tests
@@ -349,7 +353,7 @@ Integration tests
* Add your backend to `fstest/test_all/config.yaml` * Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root: * Once you've done that then you can use the integration test framework from the project root:
* go install ./... * go install ./...
* test_all -backends remote * test_all -backend remote
Or if you want to run the integration tests manually: Or if you want to run the integration tests manually:
@@ -363,54 +367,12 @@ Or if you want to run the integration tests manually:
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page * `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org * `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation * `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant * `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -3,22 +3,19 @@ FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/ COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/ WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \ RUN \
CGO_ENABLED=0 \ CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make make
RUN ./rclone version RUN ./rclone version
# Begin final image # Begin final image
FROM alpine:latest FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse tzdata && \ RUN apk --no-cache add ca-certificates
echo "user_allow_other" >> /etc/fuse.conf
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ WORKDIR /root/
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone COPY --from=builder /go/src/github.com/rclone/rclone/rclone .
ENTRYPOINT [ "rclone" ] ENTRYPOINT [ "./rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

View File

@@ -2,20 +2,16 @@
Current active maintainers of rclone are: Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities | | Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- | | :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health | | Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | | | Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | | | Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend | | Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | | | Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend | | Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend | | Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends | | Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft** **This is a work in progress Draft**
@@ -33,11 +29,11 @@ Rclone uses the labels like this:
* `duplicate` - normally close these and ask the user to subscribe to the original * `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend * `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature * `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command * `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project * `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project * `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release * `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc. * `maintenance` - internal enhancement, code re-organisation etc
* `Needs Go 1.XX` - waiting for that version of Go to be released * `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time * `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects * `Remote: XXX` - which rclone backend this affects
@@ -45,7 +41,7 @@ Rclone uses the labels like this:
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going. If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release). When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
The milestones have these meanings: The milestones have these meanings:

17489
MANUAL.html generated

File diff suppressed because it is too large Load Diff

17355
MANUAL.md generated

File diff suppressed because it is too large Load Diff

20291
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

160
Makefile
View File

@@ -1,35 +1,22 @@
SHELL = bash SHELL = bash
# Branch we are working on BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0) LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version ifeq ($(BRANCH),$(LAST_TAG))
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif endif
TAG_BRANCH := .$(BRANCH) TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/ BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),) ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH := TAG_BRANCH :=
BRANCH_PATH := BRANCH_PATH :=
endif endif
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit) TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD) NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
# TAG is current version + commit number + commit + branch ifneq ($(TAG),$(LAST_TAG))
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH) TAG := $(TAG)-beta
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR) BETA_SUBDIR := /$(BETA_SUBDIR)
endif endif
@@ -46,26 +33,21 @@ endif
.PHONY: rclone test_all vars version .PHONY: rclone test_all vars version
rclone: rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/ cp -av `go env GOPATH`/bin/rclone .
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all: test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'" @echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'" @echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'" @echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'" @echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'" @echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version: version:
@echo '$(TAG)' @echo '$(TAG)'
@@ -76,10 +58,10 @@ test: rclone test_all
# Quick test # Quick test
quicktest: quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest: racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./... RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@@ -91,42 +73,30 @@ check: rclone
build_dep: build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz' go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux # Get the release dependencies
release_dep_linux: release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz' go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies # Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update: update:
GO111MODULE=on go get -u -t ./... GO111MODULE=on go get -u ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1 pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py ./bin/make_manual.py
MANUAL.html: MANUAL.md MANUAL.html: MANUAL.md
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
@@ -148,27 +118,14 @@ clean:
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website: website:
rm -rf docs/public
cd docs && hugo cd docs && hugo
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website upload_website: website
rclone -v sync docs/public memstore:www-rclone-org rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
validate_website: website
find docs/public -type f -name "*.html" | xargs tidy --mute-id yes -errors --gnu-emacs yes --drop-empty-elements no --warn-proprietary-attributes no --mute MISMATCHED_ATTRIBUTE_WARN
tarball: tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG) git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload: sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -187,10 +144,10 @@ upload_github:
./bin/upload-github $(TAG) ./bin/upload-github $(TAG)
cross: doc cross: doc
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta: beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -198,61 +155,60 @@ log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
ci_upload: appveyor_upload:
sudo chown -R $$USER build rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
find build -type l -delete ifndef BRANCH_PATH
gzip -r9v build rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta: travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from GitHub actions # Fetch the binary builds from travis and appveyor
fetch_binaries: fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server -v -w --disableFastRender cd docs && hugo server -v -w
tag: retag doc tag: doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new @echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md" @echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes" @echo "Then commit all the changes"
@echo git commit -m \"Version $(VERSION)\" -a -v @echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross, etc." @echo "And finally run make retag before make cross etc"
retag: retag:
@echo "Version is $(VERSION)" git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev: startdev:
@echo "Version is $(VERSION)" echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
@echo "Next version is $(NEXT_VERSION)" git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -8,10 +8,12 @@
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) [Forum](https://forum.rclone.org/)
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild) [![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) [![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone) [![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone # Rclone
@@ -19,33 +21,27 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers ## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/) * 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio) * Minio [:page_facing_up:](https://rclone.org/s3/#minio)
@@ -61,17 +57,10 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/) * SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/) * WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/) * The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -85,9 +74,8 @@ Please see [the full list of all storage providers and their features](https://r
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts * Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk * Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna * Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -1,104 +1,84 @@
# Release Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
This file describes how to make the various kinds of releases
## Extra required software for making a release
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release Making a release
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check GitHub actions build for master is Green * Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally * make test # see integration test server or run locally
* make tag * make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases * edit docs/content/changelog.md
* make tidy
* make doc * make doc
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX.0"
* make retag * make retag
* git push --follow-tags origin * git push --tags origin master
* # Wait for the GitHub builds to complete then... * # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries * make fetch_binaries
* make tarball * make tarball
* make vendorball
* make sign_upload * make sign_upload
* make check_sign * make check_sign
* make upload * make upload
* make upload_website * make upload_website
* make upload_github * make upload_github
* make startdev # make startstable for stable branch * make startdev
* # announce with forum post, twitter post, patreon post * # announce with forum post, twitter post, G+ post
Early in the next release cycle update the dependencies
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update * make update
* git status * git status
* git add new files * git add new files
* git commit -a -v * git commit -a -v
## Making a point release If `make update` fails with errors like this:
If rclone needs a point release due to some horrendous bug: ```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Set vars Can be fixed with
* BASE_TAG=v1.XX # e.g. v1.52 * GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1 * GO111MODULE=on go mod tidy
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1 * GO111MODULE=on go mod vendor
First make the release branch. If this is a second point release then Making a point release. If rclone needs a point release due to some
this will be done already. horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes * git cherry-pick any fixes
* Do the steps as above * Test (see above)
* make startstable * make NEW_TAG=v1.XX.1 tag
* git co master * edit docs/content/changelog.md
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct * make TAG=v1.43.1 doc
* git checkout ${BASE_TAG}-stable docs/content/changelog.md * git commit -a -v -m "Version v1.XX.1"
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git tag -d -v1.XX.1
* git push * git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
## Making a manual build of docker ## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this. or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
``` ```
git co v1.54.1 docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker pull golang docker push rclone/rclone:1.49.1
export DOCKER_CLI_EXPERIMENTAL=enabled docker push rclone/rclone:1.49
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
### Old build for linux/amd64 only
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker push rclone/rclone:1 docker push rclone/rclone:1
docker push rclone/rclone:latest docker push rclone/rclone:latest
``` ```

View File

@@ -1 +0,0 @@
v1.56.0

239
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,239 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
trigger:
branches:
include:
- '*'
tags:
include:
- '*'
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
GO_INSTALL_ARCH: amd64
strategy:
matrix:
linux:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
BUILD_FLAGS: '-include "^linux/"'
MAKE_CHECK: true
MAKE_QUICKTEST: true
DEPLOY: true
mac:
imageName: macos-10.13
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
DEPLOY: true
windows_amd64:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
windows_386:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
GO_INSTALL_ARCH: 386
BUILD_FLAGS: '-include "^windows/386" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true
modules_race:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
go1.9:
imageName: ubuntu-16.04
gorootDir: /usr/local
GOCACHE: '' # build caching only came in go1.10
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.11.12
MAKE_QUICKTEST: true
pool:
vmImage: $(imageName)
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
echo "##vso[task.setvariable variable=GO_LATEST]true"
echo "Latest Go version: $latestGo"
condition: eq( variables['GO_VERSION'], 'latest' )
continueOnError: false
displayName: "Get latest Go version"
- bash: |
sudo rm -f $(which go)
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
continueOnError: false
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
- task: CacheBeta@0
inputs:
key: go-build-cache | "$(Agent.JobName)"
path: $(GOCACHE)
continueOnError: true
displayName: Cache go build
condition: ne( variables['GOCACHE'], '' )
# Install Libraries (varies by platform)
- bash: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
condition: eq( variables['Agent.OS'], 'Linux' )
continueOnError: false
displayName: Install Libraries on Linux
- bash: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
condition: eq( variables['Agent.OS'], 'Darwin' )
continueOnError: false
displayName: Install Libraries on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GO_INSTALL_ARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
condition: eq( variables['Agent.OS'], 'Windows_NT' )
continueOnError: false
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
continueOnError: false
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
continueOnError: false
displayName: Install Go on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
Write-Host "Extracting Go"
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
continueOnError: false
displayName: Install Go on Windows
# Display environment for debugging
- bash: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
# Run Tests
- bash: |
make
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
- bash: |
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
- bash: |
make build_dep
make check
workingDirectory: '$(modulePath)'
displayName: Code quality test
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make
make compile_all
workingDirectory: '$(modulePath)'
displayName: Compile all architectures test
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
- bash: |
make travis_beta
env:
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
workingDirectory: '$(modulePath)'
displayName: Deploy built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )

View File

@@ -1,12 +1,10 @@
package alias package alias
import ( import (
"context"
"errors" "errors"
"strings" "strings"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
@@ -35,7 +33,7 @@ type Options struct {
// NewFs constructs an Fs from the path. // NewFs constructs an Fs from the path.
// //
// The returned Fs is the actual Fs, referenced by remote in the config // The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -48,5 +46,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if strings.HasPrefix(opt.Remote, name+":") { if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting") return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
} }
return cache.Get(ctx, fspath.JoinRootPath(opt.Remote, root)) fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
} }

View File

@@ -11,7 +11,6 @@ import (
_ "github.com/rclone/rclone/backend/local" // pull in test backend _ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -20,7 +19,7 @@ var (
) )
func prepare(t *testing.T, root string) { func prepare(t *testing.T, root string) {
configfile.Install() config.LoadConfig()
// Configure the remote // Configure the remote
config.FileSet(remoteName, "type", "alias") config.FileSet(remoteName, "type", "alias")
@@ -55,22 +54,21 @@ func TestNewFS(t *testing.T) {
{"four/under four.txt", 9, false}, {"four/under four.txt", 9, false},
}}, }},
{"four", "..", "", true, []testEntry{ {"four", "..", "", true, []testEntry{
{"five", -1, true}, {"four", -1, true},
{"under four.txt", 9, false}, {"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}}, }},
{"", "../../three", "", true, []testEntry{ {"four", "../three", "", true, []testEntry{
{"underthree.txt", 9, false}, {"underthree.txt", 9, false},
}}, }},
{"four", "../../five", "", true, []testEntry{
{"underfive.txt", 6, false},
}},
} { } {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList) what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot))) remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what) require.NoError(t, err, what)
prepare(t, remoteRoot) prepare(t, remoteRoot)
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what) require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList) gotEntries, err := f.List(context.Background(), test.fsList)
require.NoError(t, err, what) require.NoError(t, err, what)
@@ -92,7 +90,7 @@ func TestNewFS(t *testing.T) {
func TestNewFSNoRemote(t *testing.T) { func TestNewFSNoRemote(t *testing.T) {
prepare(t, "") prepare(t, "")
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName)) f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err) require.Error(t, err)
require.Nil(t, f) require.Nil(t, f)
@@ -100,7 +98,7 @@ func TestNewFSNoRemote(t *testing.T) {
func TestNewFSInvalidRemote(t *testing.T) { func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:") prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(context.Background(), fmt.Sprintf("%s:", remoteName)) f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err) require.Error(t, err)
require.Nil(t, f) require.Nil(t, f)

View File

@@ -8,25 +8,19 @@ import (
_ "github.com/rclone/rclone/backend/b2" _ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic" _ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/pcloud"
@@ -34,15 +28,9 @@ import (
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav" _ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex" _ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"
) )

View File

@@ -16,6 +16,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log"
"net/http" "net/http"
"path" "path"
"strings" "strings"
@@ -31,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
@@ -69,28 +69,45 @@ func init() {
Prefix: "acd", Prefix: "acd",
Description: "Amazon Drive", Description: "Amazon Drive",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(name string, m configmap.Mapper) {
return oauthutil.ConfigOut("", &oauthutil.Options{ err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
OAuth2Config: acdConfig, if err != nil {
}) log.Fatalf("Failed to configure token: %v", err)
}
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint", Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).", Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
Advanced: true, Advanced: true,
}, { }, {
Name: "upload_wait_per_gb", Name: "upload_wait_per_gb",
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears. Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1 GiB in size and nearly every time for happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10 GiB. This parameter controls the time rclone waits files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear. for the file to appear.
The default value for this parameter is 3 minutes per GiB, so by The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GiB uploaded to see if the default it will wait 3 minutes for every GB uploaded to see if the
file appears. file appears.
You can disable this feature by setting it to 0. This may cause You can disable this feature by setting it to 0. This may cause
@@ -110,7 +127,7 @@ in this situation.`,
Files this size or more will be downloaded via their "tempLink". This Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10 GiB. The default for this is 9 GiB which of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed. shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink" To download files above this threshold, rclone requests a "tempLink"
@@ -118,23 +135,15 @@ which downloads the file through a temporary URL directly from the
underlying S3 storage.`, underlying S3 storage.`,
Default: defaultTempLinkThreshold, Default: defaultTempLinkThreshold,
Advanced: true, Advanced: true,
}, { }},
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Checkpoint string `config:"checkpoint"` Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"` UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"` TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote acd server // Fs represents a remote acd server
@@ -142,7 +151,6 @@ type Fs struct {
name string // name of this remote name string // name of this remote
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this Fs opt Options // options for this Fs
ci *fs.ConfigInfo // global config
c *acd.Client // the connection to the acd server c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on root string // the path we are working on
@@ -152,7 +160,7 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
} }
// Object describes an acd object // Object describes a acd object
// //
// Will definitely have info but maybe not meta // Will definitely have info but maybe not meta
type Object struct { type Object struct {
@@ -203,10 +211,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if resp != nil { if resp != nil {
if resp.StatusCode == 401 { if resp.StatusCode == 401 {
f.tokenRenewer.Invalidate() f.tokenRenewer.Invalidate()
@@ -215,7 +220,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
} }
// Work around receiving this error sporadically on authentication // Work around receiving this error sporadically on authentication
// //
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err return true, err
@@ -241,7 +246,8 @@ func filterRequest(req *http.Request) {
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -249,7 +255,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
root = parsePath(root) root = parsePath(root)
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface { if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request)) SetRequestFilter(f func(req *http.Request))
}); ok { }); ok {
@@ -257,31 +263,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} else { } else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail") fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
} }
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive") return nil, errors.Wrap(err, "failed to configure Amazon Drive")
} }
c := acd.NewClient(oAuthClient) c := acd.NewClient(oAuthClient)
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
c: c, c: c,
pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
noAuthClient: fshttp.NewClient(ctx), noAuthClient: fshttp.NewClient(fs.Config),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
ReadMimeType: true, ReadMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(f)
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo(ctx) _, err := f.getRootInfo()
return err return err
}) })
@@ -289,14 +293,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints() _, resp, err = f.c.Account.GetEndpoints()
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints") return nil, errors.Wrap(err, "failed to get endpoints")
} }
// Get rootID // Get rootID
rootInfo, err := f.getRootInfo(ctx) rootInfo, err := f.getRootInfo()
if err != nil || rootInfo.Id == nil { if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root") return nil, errors.Wrap(err, "failed to get root")
} }
@@ -338,11 +342,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// getRootInfo gets the root folder info // getRootInfo gets the root folder info
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) { func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot() rootInfo, resp, err = f.c.Nodes.GetRoot()
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
return rootInfo, err return rootInfo, err
} }
@@ -380,8 +384,8 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var resp *http.Response var resp *http.Response
var subFolder *acd.Folder var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf)) subFolder, resp, err = folder.GetFolder(leaf)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
if err == acd.ErrorNodeNotFound { if err == acd.ErrorNodeNotFound {
@@ -407,8 +411,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var resp *http.Response var resp *http.Response
var info *acd.Folder var info *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.CreateFolder(leaf)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
//fmt.Printf("...Error %v\n", err) //fmt.Printf("...Error %v\n", err)
@@ -429,7 +433,7 @@ type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID query := "parents:" + dirID
if directoriesOnly { if directoriesOnly {
query += " AND kind:" + folderKind query += " AND kind:" + folderKind
@@ -450,7 +454,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
var resp *http.Response var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts) nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return false, err return false, err
@@ -475,7 +479,6 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
if !hasValidParent { if !hasValidParent {
continue continue
} }
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing // Store the nodes up in case we have to retry the listing
out = append(out, node) out = append(out, node)
} }
@@ -501,15 +504,19 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
maxTries := f.ci.LowLevelRetries maxTries := fs.Config.LowLevelRetries
var iErr error var iErr error
for tries := 1; tries <= maxTries; tries++ { for tries := 1; tries <= maxTries; tries++ {
entries = nil entries = nil
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool { _, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
remote := path.Join(dir, *node.Name) remote := path.Join(dir, *node.Name)
switch *node.Kind { switch *node.Kind {
case folderKind: case folderKind:
@@ -526,7 +533,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
entries = append(entries, o) entries = append(entries, o)
default: default:
// ignore ASSET, etc. // ignore ASSET etc
} }
return false return false
}) })
@@ -648,7 +655,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err return nil, err
} }
// If not create it // If not create it
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -661,14 +668,14 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now() start := time.Now()
f.tokenRenewer.Start() f.tokenRenewer.Start()
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop() f.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -679,11 +686,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true) err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err return err
} }
// Move src to this remote using server-side move operations. // Move src to this remote using server side move operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -701,6 +714,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// create the destination directory if necessary // create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -709,7 +726,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false) err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -720,7 +737,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstObj fs.Object dstObj fs.Object
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= f.ci.LowLevelRetries; i++ { for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
@@ -735,7 +752,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// finished if src not found and dst found // finished if src not found and dst found
break break
} }
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries) fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
return dstObj, dstErr return dstObj, dstErr
@@ -748,7 +765,7 @@ func (f *Fs) DirCacheFlush() {
} }
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations. // using server side move operations.
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -770,24 +787,54 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory") return errors.New("can't move root directory")
} }
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary // Find ID of dst parent, creating subdirs if necessary
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true) findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
_, err = f.dirCache.FindDir(ctx, dstRemote, false) if dstRemote != "" {
if err == fs.ErrorDirNotFound { _, err = f.dirCache.FindDir(ctx, dstRemote, false)
// OK if err == fs.ErrorDirNotFound {
} else if err != nil { // OK
return err } else if err != nil {
} else { return err
return fs.ErrorDirExists } else {
return fs.ErrorDirExists
}
} }
// Find ID of src parent // Find ID of src parent
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false) findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
}
if err != nil { if err != nil {
return err return err
} }
@@ -804,7 +851,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var jsonStr string var jsonStr string
err = srcFs.pacer.Call(func() (bool, error) { err = srcFs.pacer.Call(func() (bool, error) {
jsonStr, err = srcInfo.GetMetadata() jsonStr, err = srcInfo.GetMetadata()
return srcFs.shouldRetry(ctx, nil, err) return srcFs.shouldRetry(nil, err)
}) })
if err != nil { if err != nil {
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err) fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
@@ -816,7 +863,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return err return err
} }
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true) err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
if err != nil { if err != nil {
return err return err
} }
@@ -833,6 +880,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
@@ -841,7 +892,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check { if check {
// check directory is empty // check directory is empty
empty := true empty := true
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool { _, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind { switch *node.Kind {
case folderKind: case folderKind:
empty = false empty = false
@@ -866,7 +917,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash() resp, err = node.Trash()
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -896,7 +947,7 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5) return hash.Set(hash.MD5)
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -924,8 +975,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, dir, false) return f.purgeCheck(ctx, "", false)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -976,7 +1027,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.info != nil { if o.info != nil {
return nil return nil
} }
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@@ -987,8 +1038,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response var resp *http.Response
var info *acd.File var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf)) info, resp, err = folder.GetFile(leaf)
return o.fs.shouldRetry(ctx, resp, err) return o.fs.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
if err == acd.ErrorNodeNotFound { if err == acd.ErrorNodeNotFound {
@@ -1045,7 +1096,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} else { } else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
} }
return o.fs.shouldRetry(ctx, resp, err) return o.fs.shouldRetry(resp, err)
}) })
return in, err return in, err
} }
@@ -1068,7 +1119,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if ok { if ok {
return false, nil return false, nil
} }
return o.fs.shouldRetry(ctx, resp, err) return o.fs.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -1078,70 +1129,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove a node // Remove a node
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error { func (f *Fs) removeNode(info *acd.Node) error {
var resp *http.Response var resp *http.Response
var err error var err error
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = info.Trash() resp, err = info.Trash()
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
return err return err
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
return o.fs.removeNode(ctx, o.info) return o.fs.removeNode(o.info)
} }
// Restore a node // Restore a node
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) { func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Restore() newInfo, resp, err = info.Restore()
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
return newInfo, err return newInfo, err
} }
// Changes name of given node // Changes name of given node
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) { func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName)) newInfo, resp, err = info.Rename(newName)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
return newInfo, err return newInfo, err
} }
// Replaces one parent with another, effectively moving the file. Leaves other // Replaces one parent with another, effectively moving the file. Leaves other
// parents untouched. ReplaceParent cannot be used when the file is trashed. // parents untouched. ReplaceParent cannot be used when the file is trashed.
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error { func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := info.ReplaceParent(oldParentID, newParentID) resp, err := info.ReplaceParent(oldParentID, newParentID)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
} }
// Adds one additional parent to object. // Adds one additional parent to object.
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error { func (f *Fs) addParent(info *acd.Node, newParentID string) error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := info.AddParent(newParentID) resp, err := info.AddParent(newParentID)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
} }
// Remove given parent from object, leaving the other possible // Remove given parent from object, leaving the other possible
// parents untouched. Object can end up having no parents. // parents untouched. Object can end up having no parents.
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error { func (f *Fs) removeParent(info *acd.Node, parentID string) error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := info.RemoveParent(parentID) resp, err := info.RemoveParent(parentID)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
} }
// moveNode moves the node given from the srcLeaf,srcDirectoryID to // moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID // the dstLeaf,dstDirectoryID
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) { func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID) // fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
cantMove := fs.ErrorCantMove cantMove := fs.ErrorCantMove
if useDirErrorMsgs { if useDirErrorMsgs {
@@ -1155,7 +1206,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
if srcLeaf != dstLeaf { if srcLeaf != dstLeaf {
// fs.Debugf(name, "renaming") // fs.Debugf(name, "renaming")
_, err = f.renameNode(ctx, srcInfo, dstLeaf) _, err = f.renameNode(srcInfo, dstLeaf)
if err != nil { if err != nil {
fs.Debugf(name, "Move: quick path rename failed: %v", err) fs.Debugf(name, "Move: quick path rename failed: %v", err)
goto OnConflict goto OnConflict
@@ -1163,7 +1214,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
} }
if srcDirectoryID != dstDirectoryID { if srcDirectoryID != dstDirectoryID {
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID) // fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID) err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
if err != nil { if err != nil {
fs.Debugf(name, "Move: quick path parent replace failed: %v", err) fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
return err return err
@@ -1176,13 +1227,13 @@ OnConflict:
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.") fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
// fs.Debugf(name, "Trashing file") // fs.Debugf(name, "Trashing file")
err = f.removeNode(ctx, srcInfo) err = f.removeNode(srcInfo)
if err != nil { if err != nil {
fs.Debugf(name, "Move: remove node failed: %v", err) fs.Debugf(name, "Move: remove node failed: %v", err)
return err return err
} }
// fs.Debugf(name, "Renaming file") // fs.Debugf(name, "Renaming file")
_, err = f.renameNode(ctx, srcInfo, dstLeaf) _, err = f.renameNode(srcInfo, dstLeaf)
if err != nil { if err != nil {
fs.Debugf(name, "Move: rename node failed: %v", err) fs.Debugf(name, "Move: rename node failed: %v", err)
return err return err
@@ -1190,19 +1241,19 @@ OnConflict:
// note: replacing parent is forbidden by API, modifying them individually is // note: replacing parent is forbidden by API, modifying them individually is
// okay though // okay though
// fs.Debugf(name, "Adding target parent") // fs.Debugf(name, "Adding target parent")
err = f.addParent(ctx, srcInfo, dstDirectoryID) err = f.addParent(srcInfo, dstDirectoryID)
if err != nil { if err != nil {
fs.Debugf(name, "Move: addParent failed: %v", err) fs.Debugf(name, "Move: addParent failed: %v", err)
return err return err
} }
// fs.Debugf(name, "removing original parent") // fs.Debugf(name, "removing original parent")
err = f.removeParent(ctx, srcInfo, srcDirectoryID) err = f.removeParent(srcInfo, srcDirectoryID)
if err != nil { if err != nil {
fs.Debugf(name, "Move: removeParent failed: %v", err) fs.Debugf(name, "Move: removeParent failed: %v", err)
return err return err
} }
// fs.Debugf(name, "Restoring") // fs.Debugf(name, "Restoring")
_, err = f.restoreNode(ctx, srcInfo) _, err = f.restoreNode(srcInfo)
if err != nil { if err != nil {
fs.Debugf(name, "Move: restoreNode node failed: %v", err) fs.Debugf(name, "Move: restoreNode node failed: %v", err)
return err return err
@@ -1303,11 +1354,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 { if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok { if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name // and append the drive file name to compute the full file name
name := f.opt.Enc.ToStandardName(*node.Name)
if len(path) > 0 { if len(path) > 0 {
path = path + "/" + name path = path + "/" + *node.Name
} else { } else {
path = name path = *node.Name
} }
// this will now clear the actual file too // this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject}) pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris
package azureblob package azureblob
@@ -16,20 +16,3 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier enabled = f.Features().GetTier
assert.True(t, enabled) assert.True(t, enabled)
} }
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,16 +1,14 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.14 // +build !plan9,!solaris
package azureblob package azureblob
import ( import (
"context"
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
@@ -29,36 +27,11 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs) return f.setUploadChunkSize(cs)
} }
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var ( var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil) _ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
) )
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 solaris js !go1.14 // +build plan9 solaris
package azureblob package azureblob

View File

@@ -1,137 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, errors.Wrap(err, "MSI is not enabled on this VM")
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, errors.Wrap(err, "Couldn't read IMDS response")
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, errors.Wrap(err, "Couldn't unmarshal IMDS response")
}
return result, nil
}

View File

@@ -1,117 +0,0 @@
// +build !plan9,!solaris,!js,go1.14
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -2,11 +2,12 @@ package api
import ( import (
"fmt" "fmt"
"path"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/version"
) )
// Error describes a B2 error response // Error describes a B2 error response
@@ -49,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC) // MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) { func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano() timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
} }
// UnmarshalJSON turns JSON into a Timestamp // UnmarshalJSON turns JSON into a Timestamp
@@ -58,21 +59,20 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC()) *t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil return nil
} }
// HasVersion returns true if it looks like the passed filename has a timestamp on it. const versionFormat = "-v2006-01-02-150405.000"
//
// Note that the passed filename's timestamp may still be invalid even if this
// function returns true.
func HasVersion(remote string) bool {
return version.Match(remote)
}
// AddVersion adds the timestamp as a version string into the filename passed in. // AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string { func (t Timestamp) AddVersion(remote string) string {
return version.Add(remote, time.Time(t)) ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
} }
// RemoveVersion removes the timestamp from a filename as a version string. // RemoveVersion removes the timestamp from a filename as a version string.
@@ -80,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
// It returns the new file name and a timestamp, or the old filename // It returns the new file name and a timestamp, or the old filename
// and a zero timestamp. // and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) { func RemoveVersion(remote string) (t Timestamp, newRemote string) {
time, newRemote := version.Remove(remote) newRemote = remote
t = Timestamp(time) ext := path.Ext(remote)
return base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
} }
// IsZero returns true if the timestamp is uninitialized // IsZero returns true if the timestamp is uninitialized
@@ -322,11 +337,3 @@ type CopyFileRequest struct {
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only) Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
} }
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -13,6 +13,7 @@ import (
var ( var (
emptyT api.Timestamp emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z")) t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z")) t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
) )
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual)) assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
} }
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) { func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero()) assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero()) assert.False(t, t0.IsZero())

File diff suppressed because it is too large Load Diff

View File

@@ -20,9 +20,7 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
) )
type hashAppendingReader struct { type hashAppendingReader struct {
@@ -70,26 +68,20 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
// largeUpload is used to control the upload of large files which need chunking // largeUpload is used to control the upload of large files which need chunking
type largeUpload struct { type largeUpload struct {
f *Fs // parent Fs f *Fs // parent Fs
o *Object // object being uploaded o *Object // object being uploaded
doCopy bool // doing copy rather than upload in io.Reader // read the data from here
what string // text name of operation for logs wrap accounting.WrapFn // account parts being transferred
in io.Reader // read the data from here id string // ID of the file being uploaded
wrap accounting.WrapFn // account parts being transferred size int64 // total size
id string // ID of the file being uploaded parts int64 // calculated number of parts, if known
size int64 // total size sha1s []string // slice of SHA1s for each part
parts int64 // calculated number of parts, if known uploadMu sync.Mutex // lock for upload variable
sha1s []string // slice of SHA1s for each part uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
// func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
remote := o.remote remote := o.remote
size := src.Size() size := src.Size()
parts := int64(0) parts := int64(0)
@@ -97,8 +89,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
parts = size / int64(chunkSize) parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(chunkSize) != 0 { if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++ parts++
} }
if parts > maxParts { if parts > maxParts {
@@ -107,61 +99,50 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts sha1SliceSize = parts
} }
modTime := src.ModTime(ctx)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
} }
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket) bucketID, err := f.getBucketID(bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath), Name: bucketPath,
} ContentType: fs.MimeType(ctx, src),
if newInfo == nil { Info: map[string]string{
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
} }
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
} else {
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
} }
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
up = &largeUpload{
f: f,
o: o,
doCopy: doCopy,
what: "upload",
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
if doCopy { in, wrap := accounting.UnWrap(in)
up.what = "copy" up = &largeUpload{
up.src = src.(*Object) f: f,
} else { o: o,
up.in, up.wrap = accounting.UnWrap(in) in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
} }
return up, nil return up, nil
} }
@@ -169,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
defer up.uploadMu.Unlock() defer up.uploadMu.Unlock()
if len(up.uploads) == 0 { if len(up.uploads) == 0 {
@@ -181,8 +162,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id, ID: up.id,
} }
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload) resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, errors.Wrap(err, "failed to get upload URL")
@@ -203,13 +184,20 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk // Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error { func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL()
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -230,14 +218,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
// //
// The number of bytes in the file being uploaded. Note that // The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just // this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but // use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes) // the last one is 100MB.
// //
// X-Bz-Content-Sha1 // X-Bz-Content-Sha1
// //
// The SHA1 checksum of the this part of the file. B2 will // The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the // check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be // data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file. // passed to b2_finish_large_file.
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -253,8 +241,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
@@ -275,41 +263,9 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return err return err
} }
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done copying chunk %d", part)
}
return err
}
// finish closes off the large upload // finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error { func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_finish_large_file", Path: "/b2_finish_large_file",
@@ -320,8 +276,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
var response api.FileInfo var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -330,8 +286,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
// cancel aborts the large upload // cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error { func (up *largeUpload) cancel() error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_cancel_large_file", Path: "/b2_cancel_large_file",
@@ -341,142 +296,142 @@ func (up *largeUpload) cancel(ctx context.Context) error {
} }
var response api.CancelLargeFileResponse var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil {
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
}
return err return err
} }
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial // Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input // chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF. // reaches EOF.
// func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( errs := make(chan error, 1)
g, gCtx = errgroup.WithContext(ctx) hasMoreParts := true
hasMoreParts = true var wg sync.WaitGroup
)
// Transfer initial chunk
up.size = int64(len(initialUploadBlock)) up.size = int64(len(initialUploadBlock))
g.Go(func() error { up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error outer:
// gCtx is cancelled. There is no point in uploading all the other parts. for part := int64(2); hasMoreParts; part++ {
if gCtx.Err() != nil { // Check any errors
up.f.putBuf(buf, false) select {
return nil case err = <-errs:
} break outer
default:
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
} }
return nil
}) // Get a block of memory
err = g.Wait() buf := up.f.getUploadBlock()
if err != nil {
return err // Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
} }
wg.Wait()
up.sha1s = up.sha1s[:up.parts] up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.finishOrCancelOnError(err, errs)
} }
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) { func (up *largeUpload) Upload() error {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })() fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) remaining := up.size
var ( errs := make(chan error, 1)
g, gCtx = errgroup.WithContext(ctx) var wg sync.WaitGroup
remaining = up.size var err error
) outer:
g.Go(func() error { for part := int64(1); part <= up.parts; part++ {
for part := int64(1); part <= up.parts; part++ { // Check any errors
// Get a block of memory from the pool and token which limits concurrency. select {
buf := up.f.getBuf(up.doCopy) case err = <-errs:
break outer
// Fail fast, in case an errgroup managed function returns an error default:
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
} }
return nil
}) reqSize := remaining
err = g.Wait() if reqSize >= int64(up.f.opt.ChunkSize) {
if err != nil { reqSize = int64(up.f.opt.ChunkSize)
return err }
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
} }
return up.finish(ctx) wg.Wait()
return up.finishOrCancelOnError(err, errs)
} }

View File

@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
// Error is returned from box when things go wrong // Error is returned from box when things go wrong
type Error struct { type Error struct {
Type string `json:"type"` Type string `json:"type"`
Status int `json:"status"` Status int `json:"status"`
Code string `json:"code"` Code string `json:"code"`
ContextInfo json.RawMessage `json:"context_info"` ContextInfo json.RawMessage
HelpURL string `json:"help_url"` HelpURL string `json:"help_url"`
Message string `json:"message"` Message string `json:"message"`
RequestID string `json:"request_id"` RequestID string `json:"request_id"`
} }
// Error returns a string for the error and satisfies the error interface // Error returns a string for the error and satisfies the error interface
@@ -132,38 +132,6 @@ type UploadFile struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
} }
// PreUploadCheck is the request for upload preflight check
type PreUploadCheck struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
Size *int64 `json:"size,omitempty"`
}
// PreUploadCheckResponse is the response from upload preflight check
// if successful
type PreUploadCheckResponse struct {
UploadToken string `json:"upload_token"`
UploadURL string `json:"upload_url"`
}
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info // UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct { type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
@@ -234,43 +202,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"` } `json:"attributes"`
} }
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}
// User is returned from /users/me
type User struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}

View File

@@ -11,12 +11,9 @@ package box
import ( import (
"context" "context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt" "fmt"
"io" "io"
"io/ioutil" "log"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -24,12 +21,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -38,14 +29,12 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const ( const (
@@ -53,13 +42,13 @@ const (
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL" rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
rootURL = "https://api.box.com/2.0" rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0" uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024 defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
) )
// Globals // Globals
@@ -83,49 +72,21 @@ func init() {
Name: "box", Name: "box",
Description: "Box", Description: "Box",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file") err := oauthutil.Config("box", name, m, oauthConfig)
boxSubType, boxSubTypeOk := m.Get("box_sub_type") if err != nil {
boxAccessToken, boxAccessTokenOk := m.Get("access_token") log.Fatalf("Failed to configure token: %v", err)
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
} }
return nil, nil
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: "root_folder_id", Name: config.ConfigClientID,
Help: "Fill in for rclone to use a non root folder as its starting point.", Help: "Box App Client Id.\nLeave blank normally.",
Default: "0",
Advanced: true,
}, { }, {
Name: "box_config_file", Name: config.ConfigClientSecret,
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50 MiB).", Help: "Cutoff for switching to multipart upload (>= 50MB).",
Default: fs.SizeSuffix(defaultUploadCutoff), Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true, Advanced: true,
}, { }, {
@@ -133,120 +94,14 @@ func init() {
Help: "Max number of times to try committing a multipart file.", Help: "Max number of times to try committing a multipart file.",
Default: 100, Default: 100,
Advanced: true, Advanced: true,
}, { }},
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
// > Box only supports file or folder names that are 255 characters or less.
// > File names containing non-printable ascii, "/" or "\", names with leading
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
//
// Testing revealed names with leading spaces work fine.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}) })
} }
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
return errors.Wrap(err, "get box config")
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
return errors.Wrap(err, "get decrypted private key")
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
return errors.Wrap(err, "get claims")
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(ctx)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"` CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
} }
// Fs represents a remote box // Fs represents a remote box
@@ -298,7 +153,7 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// parsePath parses a box 'url' // parsePath parses an box 'url'
func parsePath(path string) (root string) { func parsePath(path string) (root string) {
root = strings.Trim(path, "/") root = strings.Trim(path, "/")
return return
@@ -316,23 +171,32 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func shouldRetry(resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
authRetry := false authRetry := false
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") { if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true authRetry = true
fs.Debugf(nil, "Should retry: %v", err) fs.Debugf(nil, "Should retry: %v", err)
} }
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// substitute reserved characters for box
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "", -1)
}
// restore reserved characters for box
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -340,8 +204,8 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) { if item.Name == leaf {
info = item info = item
return true return true
} }
@@ -374,7 +238,8 @@ func errorHandler(resp *http.Response) error {
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -387,60 +252,32 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
root = parsePath(root) root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
client := fshttp.NewClient(ctx) if err != nil {
var ts *oauthutil.TokenSource return nil, errors.Wrap(err, "failed to configure Box")
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
// If using an accessToken, set the Authorization header // Renew the token in the background
if f.opt.AccessToken != "" { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) _, err := f.readMetaDataForPath(ctx, "")
} return err
})
jsonFile, ok := m.Get("box_config_file") // Get rootID
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
}
// Get rootFolderID
rootID := f.opt.RootFolderID
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
// Find the current root // Find the current root
@@ -465,7 +302,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
return nil, err return nil, err
} }
f.features.Fill(ctx, &tempF) f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/rclone/rclone/issues/2182
@@ -515,8 +352,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if strings.EqualFold(item.Name, leaf) { if item.Name == leaf {
pathIDOut = item.ID pathIDOut = item.ID
return true return true
} }
@@ -543,14 +380,14 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
mkdir := api.CreateFolder{ mkdir := api.CreateFolder{
Name: f.opt.Enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
Parent: api.Parent{ Parent: api.Parent{
ID: pathID, ID: pathID,
}, },
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
//fmt.Printf("...Error %v\n", err) //fmt.Printf("...Error %v\n", err)
@@ -571,7 +408,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folders/" + dirID + "/items", Path: "/folders/" + dirID + "/items",
@@ -586,8 +423,8 @@ OUTER:
var result api.FolderItems var result api.FolderItems
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return found, errors.Wrap(err, "couldn't list files") return found, errors.Wrap(err, "couldn't list files")
@@ -609,7 +446,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive { if item.ItemStatus != api.ItemStatusActive {
continue continue
} }
item.Name = f.opt.Enc.ToStandardName(item.Name) item.Name = restoreReservedChars(item.Name)
if fn(item) { if fn(item) {
found = true found = true
break OUTER break OUTER
@@ -633,12 +470,16 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { _, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name) remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder { if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups // cache the directory ID for later lookups
@@ -673,7 +514,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return return
} }
@@ -685,80 +526,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
return o, leaf, directoryID, nil return o, leaf, directoryID, nil
} }
// preUploadCheck checks to see if a file can be uploaded
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
if size >= 0 {
check.Size = &size
}
opts := rest.Opts{
Method: "OPTIONS",
Path: "/files/content/",
}
var result api.PreUploadCheckResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
}
return "", nil
}
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// If directory doesn't exist, file doesn't exist so can upload existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
remote := src.Remote() switch err {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) case nil:
if err != nil { return existingObj, existingObj.Update(ctx, in, src, options...)
if err == fs.ErrorDirNotFound { case fs.ErrorObjectNotFound:
return f.PutUnchecked(ctx, in, src, options...) // Not found so create it
} return f.PutUnchecked(ctx, in, src)
default:
return nil, err return nil, err
} }
// Preflight check the upload, which returns the ID if the
// object already exists
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if ID == "" {
return f.PutUnchecked(ctx, in, src, options...)
}
// If object exists then create a skeleton one with just id
o := &Object{
fs: f,
remote: remote,
id: ID,
}
return o, o.Update(ctx, in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
@@ -787,20 +570,26 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true) err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err return err
} }
// deleteObject removes an object by ID // deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error { func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/" + id, Path: "/files/" + id,
NoResponse: true, NoResponse: true,
} }
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts) resp, err := f.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
} }
@@ -812,6 +601,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
@@ -826,8 +619,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check)) opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts) resp, err = f.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "rmdir failed") return errors.Wrap(err, "rmdir failed")
@@ -851,7 +644,7 @@ func (f *Fs) Precision() time.Duration {
return time.Second return time.Second
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -889,8 +682,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/files/" + srcObj.id + "/copy", Path: "/files/" + srcObj.id + "/copy",
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{ copyFile := api.CopyFile{
Name: f.opt.Enc.FromStandardName(leaf), Name: replacedLeaf,
Parent: api.Parent{ Parent: api.Parent{
ID: directoryID, ID: directoryID,
}, },
@@ -898,8 +692,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info) resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -916,12 +710,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, dir, false) return f.purgeCheck(ctx, "", false)
} }
// move a file or folder // move a file or folder
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) { func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object // Move the object
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
@@ -929,15 +723,15 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
move := api.UpdateFileMove{ move := api.UpdateFileMove{
Name: f.opt.Enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
Parent: api.Parent{ Parent: api.Parent{
ID: directoryID, ID: directoryID,
}, },
} }
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info) resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -945,31 +739,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
return info, nil return info, nil
} }
// About gets quota information // Move src to this remote using server side move operations.
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/me",
}
var user api.User
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use
Total: fs.NewUsageValue(user.SpaceAmount), // bytes total
Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free
}
return usage, nil
}
// Move src to this remote using server-side move operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -992,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Do the move // Do the move
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID) info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1005,7 +775,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations. // using server side move operations.
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -1018,14 +788,64 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) // Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
// Do the move // Do the move
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID) _, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil { if err != nil {
return err return err
} }
@@ -1034,7 +854,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false) id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts var opts rest.Opts
if err == nil { if err == nil {
@@ -1067,72 +887,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var info api.Item var info api.Item
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info) resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
return info.SharedLink.URL, err return info.SharedLink.URL, err
} }
// deletePermanently permanently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",
NoResponse: true,
}
if itemType == api.ItemTypeFile {
opts.Path = "/files/" + id + "/trash"
} else {
opts.Path = "/folders/" + id + "/trash"
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
})
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
return
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
@@ -1164,6 +924,11 @@ func (o *Object) Remote() string {
return o.remote return o.remote
} }
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA1 { if t != hash.SHA1 {
@@ -1241,8 +1006,8 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
} }
var info *api.Item var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info) resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
return info, err return info, err
} }
@@ -1274,8 +1039,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options, Options: options,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1285,10 +1050,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload // upload does a single non-multipart upload
// //
// This is recommended for less than 50 MiB of content // This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) { func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{ upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime), ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime), ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{ Parent: api.Parent{
@@ -1305,7 +1070,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents", MultipartContentName: "contents",
MultipartFileName: upload.Name, MultipartFileName: upload.Name,
RootURL: uploadURL, RootURL: uploadURL,
Options: options,
} }
// If object has an ID then it is existing so create a new version // If object has an ID then it is existing so create a new version
if o.id != "" { if o.id != "" {
@@ -1314,8 +1078,8 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
opts.Path = "/files/content" opts.Path = "/files/content"
} }
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result) resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -1332,33 +1096,31 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil { o.fs.tokenRenewer.Start()
o.fs.tokenRenewer.Start() defer o.fs.tokenRenewer.Stop()
defer o.fs.tokenRenewer.Stop()
}
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
remote := o.Remote() remote := o.Remote()
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return err return err
} }
// Upload with simple or multipart // Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) { if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime, options...) err = o.upload(in, leaf, directoryID, modTime)
} else { } else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...) err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
} }
return err return err
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id) return o.fs.deleteObject(o.id)
} }
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not
@@ -1372,12 +1134,10 @@ var (
_ fs.Purger = (*Fs)(nil) _ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil) _ fs.IDer = (*Object)(nil)
) )

View File

@@ -1,10 +1,9 @@
// multipart upload for box // multpart upload for box
package box package box
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
@@ -19,12 +18,11 @@ import (
"github.com/rclone/rclone/backend/box/api" "github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
) )
// createUploadSession creates an upload session for the object // createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) { func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions", Path: "/files/upload_sessions",
@@ -39,12 +37,12 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else { } else {
opts.Path = "/files/upload_sessions" opts.Path = "/files/upload_sessions"
request.FolderID = directoryID request.FolderID = directoryID
request.FileName = o.fs.opt.Enc.FromStandardName(leaf) request.FileName = replaceReservedChars(leaf)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response) resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
return return
} }
@@ -55,7 +53,7 @@ func sha1Digest(digest []byte) string {
} }
// uploadPart uploads a part in an upload session // uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) { func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk)) chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk) sha1sum := sha1.Sum(chunk)
opts := rest.Opts{ opts := rest.Opts{
@@ -65,7 +63,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream", ContentType: "application/octet-stream",
ContentLength: &chunkSize, ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize), ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]), "Digest": sha1Digest(sha1sum[:]),
}, },
@@ -73,8 +70,8 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk)) opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response) resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -83,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
} }
// commitUpload finishes an upload session // commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) { func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit", Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -107,12 +104,12 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
outer: outer:
for tries = 0; tries < maxTries; tries++ { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil { if err != nil {
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
} }
body, err = rest.ReadBody(resp) body, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
delay := defaultDelay delay := defaultDelay
var why string var why string
@@ -157,7 +154,7 @@ outer:
} }
// abortUpload cancels an upload session // abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) { func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID, Path: "/files/upload_sessions/" + SessionID,
@@ -166,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
return err return err
} }
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) { func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session // Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size) session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload create session failed") return errors.Wrap(err, "multipart upload create session failed")
} }
@@ -183,13 +180,15 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong // Cancel the session if something went wrong
defer atexit.OnError(&err, func() { defer func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err) if err != nil {
cancelErr := o.abortUpload(ctx, session.ID) fs.Debugf(o, "Cancelling multipart upload: %v", err)
if cancelErr != nil { cancelErr := o.abortUpload(session.ID)
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr) if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
} }
})() }()
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
@@ -236,7 +235,7 @@ outer:
defer wg.Done() defer wg.Done()
defer o.fs.uploadToken.Put() defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...) partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part") err = errors.Wrap(err, "multipart upload failed to upload part")
select { select {
@@ -264,7 +263,7 @@ outer:
} }
// Finalise the upload session // Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil)) result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize") return errors.Wrap(err, "multipart upload failed to finalize")
} }

131
backend/cache/cache.go vendored
View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache
@@ -65,10 +65,9 @@ func init() {
Name: "cache", Name: "cache",
Description: "Cache a remote", Description: "Cache a remote",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "plex_url", Name: "plex_url",
@@ -87,7 +86,7 @@ func init() {
Advanced: true, Advanced: true,
}, { }, {
Name: "plex_insecure", Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server", Help: "Skip all certificate verifications when connecting to the Plex server",
Advanced: true, Advanced: true,
}, { }, {
Name: "chunk_size", Name: "chunk_size",
@@ -98,18 +97,18 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`, will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize, Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "1M", Value: "1m",
Help: "1 MiB", Help: "1MB",
}, { }, {
Value: "5M", Value: "5M",
Help: "5 MiB", Help: "5 MB",
}, { }, {
Value: "10M", Value: "10M",
Help: "10 MiB", Help: "10 MB",
}}, }},
}, { }, {
Name: "info_age", Name: "info_age",
Help: `How long to cache file structure information (directory listings, file size, times, etc.). Help: `How long to cache file structure information (directory listings, file size, times etc).
If all write operations are done through the cache then you can safely make If all write operations are done through the cache then you can safely make
this value very large as the cache store will also be updated in real time.`, this value very large as the cache store will also be updated in real time.`,
Default: DefCacheInfoAge, Default: DefCacheInfoAge,
@@ -132,13 +131,13 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize, Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "500M", Value: "500M",
Help: "500 MiB", Help: "500 MB",
}, { }, {
Value: "1G", Value: "1G",
Help: "1 GiB", Help: "1 GB",
}, { }, {
Value: "10G", Value: "10G",
Help: "10 GiB", Help: "10 GB",
}}, }},
}, { }, {
Name: "db_path", Name: "db_path",
@@ -339,14 +338,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil return strings.Trim(path, "/"), nil
} }
var warnDeprecated sync.Once // NewFs constructs a Fs from the path, container:path
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -367,10 +360,15 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath) return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
} }
remotePath := fspath.JoinRootPath(opt.Remote, rootPath) wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
wrappedFs, wrapErr := cache.Get(ctx, remotePath) if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile { if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath) return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
} }
var fsErr error var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath) fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -391,7 +389,6 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
cleanupChan: make(chan bool, 1), cleanupChan: make(chan bool, 1),
notifiedRemotes: make(map[string]bool), notifiedRemotes: make(map[string]bool),
} }
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers) f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{} f.plexConnector = &plexConnector{}
@@ -485,7 +482,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath) return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
} }
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath) f.tempFs, err = cache.Get(f.opt.TempWritePath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err) return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
} }
@@ -512,16 +509,19 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1) pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval) pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(ctx, f.receiveChangeNotify, pollInterval) doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
DuplicateFiles: false, // storage doesn't permit this DuplicateFiles: false, // storage doesn't permit this
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
// override only those features that use a temp fs and it doesn't support them // override only those features that use a temp fs and it doesn't support them
//f.features.ChangeNotify = f.ChangeNotify //f.features.ChangeNotify = f.ChangeNotify
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
if f.tempFs.Features().Copy == nil {
f.features.Copy = nil
}
if f.tempFs.Features().Move == nil { if f.tempFs.Features().Move == nil {
f.features.Move = nil f.features.Move = nil
} }
@@ -587,7 +587,7 @@ Some valid examples are:
"0:10" -> the first ten chunks "0:10" -> the first ten chunks
Any parameter with a key that starts with "file" can be used to Any parameter with a key that starts with "file" can be used to
specify files to fetch, e.g. specify files to fetch, eg
rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
@@ -1242,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations. // using server side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote) fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
@@ -1523,7 +1523,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return f.put(ctx, in, src, options, do) return f.put(ctx, in, src, options, do)
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote) fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
@@ -1532,9 +1532,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs()) fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
return nil, fs.ErrorCantCopy
}
// the source must be a cached object or we abort // the source must be a cached object or we abort
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
@@ -1600,7 +1597,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return co, nil return co, nil
} }
// Move src to this remote using server-side move operations. // Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote) fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
@@ -1704,20 +1701,17 @@ func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes() return f.Fs.Hashes()
} }
// Purge all files in the directory // Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context) error {
if dir == "" { fs.Infof(f, "purging cache")
// FIXME this isn't quite right as it should purge the dir prefix f.cache.Purge()
fs.Infof(f, "purging cache")
f.cache.Purge()
}
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return nil
} }
err := do(ctx, dir) err := do(ctx)
if err != nil { if err != nil {
return err return err
} }
@@ -1834,19 +1828,6 @@ func (f *Fs) isRootInPath(p string) bool {
return strings.HasPrefix(p, f.Root()+"/") return strings.HasPrefix(p, f.Root()+"/")
} }
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
for _, dir := range dirs {
_ = f.cache.RemoveDir(dir.Remote())
}
return do(ctx, dirs)
}
// DirCacheFlush flushes the dir cache // DirCacheFlush flushes the dir cache
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {
_ = f.cache.RemoveDir("") _ = f.cache.RemoveDir("")
@@ -1901,41 +1882,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx) return do(ctx)
} }
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1953,7 +1899,4 @@ var (
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
) )

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test
@@ -16,7 +15,9 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"runtime/debug" "runtime/debug"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -30,10 +31,12 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -49,7 +52,9 @@ const (
var ( var (
remoteName string remoteName string
mountDir string
uploadDir string uploadDir string
useMount bool
runInstance *run runInstance *run
errNotSupported = errors.New("not supported") errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{ decryptedToEncryptedRemotes = map[string]string{
@@ -85,7 +90,9 @@ var (
func init() { func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem") goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "") goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
} }
// TestMain drives the tests // TestMain drives the tests
@@ -93,7 +100,7 @@ func TestMain(m *testing.M) {
goflag.Parse() goflag.Parse()
var rc int var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName) log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
runInstance = newRun() runInstance = newRun()
rc = m.Run() rc = m.Run()
os.Exit(rc) os.Exit(rc)
@@ -266,8 +273,32 @@ func TestInternalObjNotFound(t *testing.T) {
require.Nil(t, obj) require.Nil(t, obj)
} }
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) { func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix()) id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -311,7 +342,6 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
} }
func TestInternalCachedUpdatedContentMatches(t *testing.T) { func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix()) id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -661,6 +691,79 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
} }
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) { func TestInternalCacheWrites(t *testing.T) {
id := "ticw" id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
@@ -808,9 +911,15 @@ func TestInternalBug2117(t *testing.T) {
type run struct { type run struct {
okDiff time.Duration okDiff time.Duration
runDefaultCfgMap configmap.Simple runDefaultCfgMap configmap.Simple
mntDir string
tmpUploadDir string tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool rootIsCrypt bool
wrappedIsExternal bool wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File tempFiles []*os.File
dbPath string dbPath string
chunkPath string chunkPath string
@@ -820,7 +929,9 @@ type run struct {
func newRun() *run { func newRun() *run {
var err error var err error
r := &run{ r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
} }
// Read in all the defaults for all the options // Read in all the defaults for all the options
@@ -833,10 +944,36 @@ func newRun() *run {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default)) r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
} }
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" { if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp") r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil { if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err)) log.Fatalf("Failed to create temp dir: %v", err)
} }
} else { } else {
r.tmpUploadDir = uploadDir r.tmpUploadDir = uploadDir
@@ -872,15 +1009,6 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
return nil, nil return nil, nil
} }
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// if the remote doesn't exist, create a new one with a local one for it // if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too) // identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false rootIsCrypt := false
@@ -889,10 +1017,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
localRemote := remote + "-local" localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local") config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true") config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache") config.FileSet(remote, "type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote)) config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
} else { } else {
remoteType := config.FileGet(remote, "type") remoteType := config.FileGet(remote, "type", "")
if remoteType == "" { if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote) t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil return nil, nil
@@ -900,17 +1028,17 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
if remoteType != "cache" { if remoteType != "cache" {
if remoteType == "crypt" { if remoteType == "crypt" {
rootIsCrypt = true rootIsCrypt = true
m.Set("password", cryptPassword1) config.FileSet(remote, "password", cryptPassword1)
m.Set("password2", cryptPassword2) config.FileSet(remote, "password2", cryptPassword2)
} }
remoteRemote := config.FileGet(remote, "remote") remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" { if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote) t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil return nil, nil
} }
remoteRemoteParts := strings.Split(remoteRemote, ":") remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0] remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type") remoteType := config.FileGet(remoteWrapping, "type", "")
if remoteType != "cache" { if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType) t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil return nil, nil
@@ -925,15 +1053,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true}) boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err) require.NoError(t, err)
ci := fs.GetConfig(context.Background()) fs.Config.LowLevelRetries = 1
ci.LowLevelRetries = 1
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// Instantiate root // Instantiate root
if purge { if purge {
boltDb.PurgeTempUploads() boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id)) _ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
} }
f, err := cache.NewFs(context.Background(), remote, id, m) f, err := cache.NewFs(remote, id, m)
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -947,21 +1082,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = f.Features().Purge(context.Background(), "") _ = f.Features().Purge(context.Background())
require.NoError(t, err) require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir(context.Background(), "")
require.NoError(t, err) require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb return f, boltDb
} }
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) { func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "") if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
cfs.StopBackgroundRunners() cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir) err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err) require.NoError(t, err)
@@ -992,6 +1139,23 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f return f
} }
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) { func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content)) r.writeRemoteBytes(t, f, remote, []byte(content))
} }
@@ -1001,11 +1165,37 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
} }
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) { func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data) var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
} }
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) { func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in) defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
} }
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
@@ -1022,6 +1212,10 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo) obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err) require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj return obj
} }
@@ -1029,16 +1223,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error var err error
var obj fs.Object var obj fs.Object
in1 := bytes.NewReader(data1) if r.useMount {
in2 := bytes.NewReader(data2) err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) require.NoError(t, err)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
_, err = f.Put(context.Background(), in1, objInfo1) obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err) require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err) require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2) err = obj.Update(context.Background(), in2, objInfo2)
}
require.NoError(t, err) require.NoError(t, err)
return obj return obj
@@ -1048,12 +1252,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote) if r.useMount {
if err != nil { f, err := os.Open(path.Join(r.mntDir, remote))
return checkSample, err defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
} }
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) { if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size) return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
} }
@@ -1076,19 +1298,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
} }
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) { func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote) var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
}
require.NoError(t, err) require.NoError(t, err)
} }
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error { func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error var err error
var obj fs.Object if r.useMount {
obj, err = f.NewObject(context.Background(), remote) err = os.Remove(path.Join(r.mntDir, remote))
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else { } else {
err = obj.Remove(context.Background()) var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
err = obj.Remove(context.Background())
}
} }
return err return err
@@ -1097,14 +1328,42 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) { func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error var err error
var l []interface{} var l []interface{}
var list fs.DirEntries if r.useMount {
list, err = f.List(context.Background(), remote) var list []os.FileInfo
for _, ll := range list { list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
l = append(l, ll) for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
} }
return l, err return l, err
} }
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error { func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src) in, err := os.Open(src)
if err != nil { if err != nil {
@@ -1129,7 +1388,13 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().DirMove != nil { if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst) err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil { if err != nil {
return err return err
@@ -1145,7 +1410,13 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().Move != nil { if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return err return err
@@ -1165,7 +1436,13 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error { func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error var err error
if rootFs.Features().Copy != nil { if r.useMount {
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src) obj, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return err return err
@@ -1185,6 +1462,13 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) { func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
@@ -1195,6 +1479,13 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil { if err != nil {
return int64(0), err return int64(0), err
@@ -1205,15 +1496,28 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error { func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error var err error
var obj1 fs.Object if r.useMount {
obj1, err = rootFs.NewObject(context.Background(), src) var f *os.File
if err != nil { f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
return err if err != nil {
return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
} }
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err return err
} }

78
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,78 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,124 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,7 +1,6 @@
// Test Cache filesystem interface // Test Cache filesystem interface
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test
@@ -18,8 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"}, UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining // Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 js // +build plan9
package cache package cache

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js // +build !plan9
// +build !race
package cache_test package cache_test

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache
@@ -101,6 +101,15 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name)) return cleanPath(path.Join(d.Dir, d.Name))
} }
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time { func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime) return time.Unix(0, d.CacheModTime)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache
@@ -24,16 +24,15 @@ const (
type Object struct { type Object struct {
fs.Object `json:"-"` fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"` CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"` CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
CacheHashes map[hash.Type]string // all supported hashes cached CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex refreshMutex sync.Mutex
@@ -104,9 +103,7 @@ func (o *Object) updateData(ctx context.Context, source fs.Object) {
o.CacheSize = source.Size() o.CacheSize = source.Size()
o.CacheStorable = source.Storable() o.CacheStorable = source.Storable()
o.CacheTs = time.Now() o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
} }
// Fs returns its FS info // Fs returns its FS info
@@ -271,9 +268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheSize = src.Size() o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
o.CacheTs = time.Now() o.CacheTs = time.Now()
o.persist() o.persist()
@@ -314,12 +309,11 @@ func (o *Object) Remove(ctx context.Context) error {
// since it might or might not be called, this is lazy loaded // since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx) _ = o.refresh(ctx)
o.cacheHashesMu.Lock()
if o.CacheHashes == nil { if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
} }
cachedHash, found := o.CacheHashes[ht] cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
if found { if found {
return cachedHash, nil return cachedHash, nil
} }
@@ -330,9 +324,7 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
o.cacheHashesMu.Lock()
o.CacheHashes[ht] = liveHash o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
o.persist() o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash) fs.Debugf(o, "object hash cached: %v", liveHash)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js // +build !plan9
package cache package cache
@@ -16,10 +16,10 @@ import (
"sync" "sync"
"time" "time"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
) )
// Constants // Constants
@@ -767,6 +767,31 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err return err
} }
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads // addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error { func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
@@ -980,6 +1005,15 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}) })
} }
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
@@ -1027,6 +1061,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
}) })
} }
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully // Close should be called when the program ends gracefully
func (b *Persistent) Close() { func (b *Persistent) Close() {
b.cleanupMux.Lock() b.cleanupMux.Lock()

View File

@@ -1,23 +0,0 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,881 +0,0 @@
package chunker
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Command line flags
var (
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
)
// test that chunking does not break large uploads
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.Kibi),
})
})
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
defer func() {
// restore original settings (f is pointer, f.opt is struct)
f.opt = saveOpt
_ = f.setChunkNameFormat(f.opt.NameFormat)
}()
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
assert.Equal(t, wantDataFormat, f.dataNameFmt)
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
}
assertFormatValid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
}
assertFormatInvalid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
}
// invalid formats
assertFormatInvalid(`chunk-#`)
assertFormatInvalid(`*-chunk`)
assertFormatInvalid(`*-*-chunk-#`)
assertFormatInvalid(`*-chunk-#-#`)
assertFormatInvalid(`#-chunk-*`)
assertFormatInvalid(`*/#`)
assertFormatValid(`*#`)
assertFormatInvalid(`**#`)
assertFormatInvalid(`#*`)
assertFormatInvalid(``)
assertFormatInvalid(`-`)
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
}
func testSmallFileInternals(t *testing.T, f *Fs) {
const dir = "small"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
checkSmallFileInternals := func(obj fs.Object) {
assert.NotNil(t, obj)
o, ok := obj.(*Object)
assert.True(t, ok)
assert.NotNil(t, o)
if o == nil {
return
}
switch {
case !f.useMeta:
// If meta format is "none", non-chunked file (even empty)
// internally is a single chunk without meta object.
assert.Nil(t, o.main)
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
assert.Equal(t, 1, len(o.chunks))
case f.hashAll:
// Consistent hashing forces meta object on small files too
assert.NotNil(t, o.main)
assert.True(t, o.isComposite())
assert.Equal(t, 1, len(o.chunks))
default:
// normally non-chunked file is kept in the Object's main field
assert.NotNil(t, o.main)
assert.False(t, o.isComposite())
assert.Equal(t, 0, len(o.chunks))
}
}
checkContents := func(obj fs.Object, contents string) {
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size())
r, err := obj.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
}
checkHashsum := func(obj fs.Object) {
var ht hash.Type
switch {
case !f.hashAll:
return
case f.useMD5:
ht = hash.MD5
case f.useSHA1:
ht = hash.SHA1
default:
return
}
// even empty files must have hashsum in consistent mode
sum, err := obj.Hash(ctx, ht)
assert.NoError(t, err)
assert.NotEqual(t, sum, "")
}
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
checkHashsum(put)
// objects returned by Put and NewObject must have similar structure
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err)
assert.NotNil(t, obj)
checkSmallFileInternals(obj)
checkContents(obj, contents)
checkHashsum(obj)
_ = obj.Remove(ctx)
_ = put.Remove(ctx) // for good
}
checkSmallFile("emptyfile", "")
checkSmallFile("smallfile", "Ok")
}
func testPreventCorruption(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "corrupted"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = true
contents := random.String(250)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
const overlapMessage = "chunk overlap"
assertOverlapError := func(err error) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), overlapMessage)
}
}
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
billyObj := newFile("billy")
billyTxn := billyObj.(*Object).xactID
if f.useNoRename {
require.True(t, billyTxn != "")
} else {
require.True(t, billyTxn == "")
}
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
}
err := f.Mkdir(ctx, billyChunkName(1))
assertOverlapError(err)
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
_, err = f.base.NewObject(ctx, billyChunk4Name)
require.NoError(t, err)
_, err = f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
f.opt.FailHard = true
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
assertOverlapError(err)
// you can freely read chunks (if you have an object)
r, err := billyChunk4.Open(ctx)
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
assert.NotEqual(t, contents, string(chunkContents))
// but you can't change them
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
assertOverlapError(err)
// Remove isn't special, you can't corrupt files even if you have an object
err = billyChunk4.Remove(ctx)
assertOverlapError(err)
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyTxn := willyObj.(*Object).xactID
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
assert.NoError(t, err)
require.NotNil(t, willyChunk)
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
assertOverlapError(err)
// operations.Move will return error when chunker's Move refused
// to corrupt target file, but reverts to copy/delete method
// still trying to delete target chunk. Chunker must come to rescue.
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
assertOverlapError(err)
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
}
func testChunkNumberOverflow(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "wreaked"
const wreakNumber = 10200300
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
}
return
}
f.opt.FailHard = false
file, fileName, fileTxn := newFile(f, "wreaker")
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.Error(t, err)
_, err = f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = false
_ = wreak.Remove(ctx)
_ = file.Remove(ctx)
}
func testMetadataInput(t *testing.T, f *Fs) {
const minChunkForTest = 50
if f.opt.ChunkSize < minChunkForTest {
t.Skip("this test requires chunks that fit metadata")
}
const dir = "usermeta"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
o, ok := obj.(*Object)
assert.NotNil(t, ok)
if o != nil {
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
o = nil
}
defer func() {
_ = obj.Remove(ctx)
_ = part.Remove(ctx)
}()
r, err := obj.Open(ctx)
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
runSubtest(pastMeta, "past")
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
runSubtest(futureMeta, "future")
}
// Test that chunker refuses to change on objects with future/unknown metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
}
saveOpt := f.opt
ctx := context.Background()
f.opt.FailHard = true
const dir = "future"
const file = dir + "/test"
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putPart := func(name string, part int, data, msg string) {
if part > 0 {
name = f.makeChunkName(name, part-1, "", "")
}
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f.base, &item, data, true)
assert.NotNil(t, obj, msg)
}
// simulate chunked object from future
meta := `{"ver":999,"nchunks":3,"size":9,"garbage":"litter","sha1":"0707f2970043f9f7c22029482db27733deaec029"}`
putPart(file, 0, meta, "metaobject")
putPart(file, 1, "abc", "chunk1")
putPart(file, 2, "def", "chunk2")
putPart(file, 3, "ghi", "chunk3")
// List should succeed
ls, err := f.List(ctx, dir)
assert.NoError(t, err)
assert.Equal(t, 1, len(ls))
assert.Equal(t, int64(9), ls[0].Size())
// NewObject should succeed
obj, err := f.NewObject(ctx, file)
assert.NoError(t, err)
assert.Equal(t, file, obj.Remote())
assert.Equal(t, int64(9), obj.Size())
// Hash must fail
_, err = obj.Hash(ctx, hash.SHA1)
assert.Equal(t, ErrMetaUnknown, err)
// Move must fail
mobj, err := operations.Move(ctx, f, nil, file+"2", obj)
assert.Nil(t, mobj)
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
// Put must fail
oi := object.NewStaticObjectInfo(file, modTime, 3, true, nil, nil)
buf := bytes.NewBufferString("abc")
_, err = f.Put(ctx, buf, oi)
assert.Error(t, err)
// Rcat must fail
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
assert.Contains(t, err.Error(), "please upgrade rclone")
}
}
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
func testBackwardsCompatibility(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't do norename transactions without metadata")
}
const dir = "backcomp"
ctx := context.Background()
saveOpt := f.opt
saveUseNoRename := f.useNoRename
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
f.useNoRename = saveUseNoRename
}()
f.opt.ChunkSize = fs.SizeSuffix(10)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(250)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
f.useNoRename = false
file, fileName := newFile(f, "renamefile")
f.opt.FailHard = false
item := fstest.NewItem(fileName, contents, modTime)
var items []fstest.Item
items = append(items, item)
f.useNoRename = true
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.NoError(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.NoError(t, err)
f.opt.FailHard = false
_ = file.Remove(ctx)
}
func testChunkerServerSideMove(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't test norename transactions without metadata")
}
ctx := context.Background()
const dir = "servermovetest"
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
assert.NoError(t, err)
fs1, isChunkerFs := subFs1.(*Fs)
assert.True(t, isChunkerFs)
fs1.useNoRename = false
fs1.opt.ChunkSize = fs.SizeSuffix(3)
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
assert.NoError(t, err)
fs2, isChunkerFs := subFs2.(*Fs)
assert.True(t, isChunkerFs)
fs2.useNoRename = true
fs2.opt.ChunkSize = fs.SizeSuffix(3)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
assert.NoError(t, err)
assert.Equal(t, int64(len(contents)), dstFile.Size())
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
if *UploadKilobytes <= 0 {
t.Skip("-upload-kilobytes is not set")
}
testPutLarge(t, f, *UploadKilobytes)
})
t.Run("ChunkNameFormat", func(t *testing.T) {
testChunkNameFormat(t, f)
})
t.Run("SmallFileInternals", func(t *testing.T) {
testSmallFileInternals(t, f)
})
t.Run("PreventCorruption", func(t *testing.T) {
testPreventCorruption(t, f)
})
t.Run("ChunkNumberOverflow", func(t *testing.T) {
testChunkNumberOverflow(t, f)
})
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
t.Run("FutureProof", func(t *testing.T) {
testFutureProof(t, f)
})
t.Run("BackwardsCompatibility", func(t *testing.T) {
testBackwardsCompatibility(t, f)
})
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,58 +0,0 @@
// Test the Chunker filesystem interface
package chunker_test
import (
"flag"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/all" // for integration tests
"github.com/rclone/rclone/backend/chunker"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Command line flags
var (
// Invalid characters are not supported by some remotes, e.g. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, e.g. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)
// TestIntegration runs integration tests against a concrete remote
// set by the -remote flag. If the flag is not set, it creates a
// dynamic chunker overlay wrapping a local temporary directory.
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*chunker.Object)(nil),
SkipBadWindowsCharacters: !*UseBadChars,
UnimplementableObjectMethods: []string{
"MimeType",
"GetTier",
"SetTier",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
},
}
if *fstest.RemoteName == "" {
name := "TestChunker"
opt.RemoteName = name + ":"
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
}
fstests.Run(t, &opt)
}

View File

@@ -1 +0,0 @@
test

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
// Test Crypt filesystem interface
package compress
import (
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
func TestRemoteGzip(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
})
}

View File

@@ -12,14 +12,12 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time"
"unicode/utf8" "unicode/utf8"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
@@ -73,6 +71,30 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given // OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use // NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int type NameEncryptionMode int
@@ -114,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
return out return out
} }
// Cipher defines an encoding and decoding cipher for the crypt backend type cipher struct {
type Cipher struct {
dataKey [32]byte // Key for secretbox dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -127,8 +148,8 @@ type Cipher struct {
} }
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val // newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) { func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &Cipher{ c := &cipher{
mode: mode, mode: mode,
cryptoRand: rand.Reader, cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt, dirNameEncrypt: dirNameEncrypt,
@@ -149,9 +170,9 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives // If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt. // slighty harder than using no salt.
// //
// Note that empty password makes all 0x00 keys which is used in the // Note that empty passsword makes all 0x00 keys which is used in the
// tests. // tests.
func (c *Cipher) Key(password, salt string) (err error) { func (c *cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt var saltBytes = defaultSalt
if salt != "" { if salt != "" {
@@ -175,18 +196,33 @@ func (c *Cipher) Key(password, salt string) (err error) {
} }
// getBlock gets a block from the pool of size blockSize // getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte { func (c *cipher) getBlock() []byte {
return c.buffers.Get().([]byte) return c.buffers.Get().([]byte)
} }
// putBlock returns a block to the pool of size blockSize // putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) { func (c *cipher) putBlock(buf []byte) {
if len(buf) != blockSize { if len(buf) != blockSize {
panic("bad blocksize returned to pool") panic("bad blocksize returned to pool")
} }
c.buffers.Put(buf) c.buffers.Put(buf)
} }
// check to see if the byte string is valid with no control characters
// from 0x00 to 0x1F and is a valid UTF-8 string
func checkValidString(buf []byte) error {
for i := range buf {
c := buf[i]
if c >= 0x00 && c < 0x20 || c == 0x7F {
return ErrorBadDecryptControlChar
}
}
if !utf8.Valid(buf) {
return ErrorBadDecryptUTF8
}
return nil
}
// encodeFileName encodes a filename using a modified version of // encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648 // standard base32 as described in RFC4648
// //
@@ -219,13 +255,13 @@ func decodeFileName(in string) ([]byte, error) {
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and // 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
// Rogaway. // Rogaway.
// //
// This makes for deterministic encryption which is what we want - the // This makes for determinstic encryption which is what we want - the
// same filename must encrypt to the same thing. // same filename must encrypt to the same thing.
// //
// This means that // This means that
// * filenames with the same name will encrypt the same // * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix // * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string { func (c *cipher) encryptSegment(plaintext string) string {
if plaintext == "" { if plaintext == "" {
return "" return ""
} }
@@ -235,7 +271,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
} }
// decryptSegment decrypts a path segment // decryptSegment decrypts a path segment
func (c *Cipher) decryptSegment(ciphertext string) (string, error) { func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
@@ -258,11 +294,15 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
err = checkValidString(plaintext)
if err != nil {
return "", err
}
return string(plaintext), err return string(plaintext), err
} }
// Simple obfuscation routines // Simple obfuscation routines
func (c *Cipher) obfuscateSegment(plaintext string) string { func (c *cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" { if plaintext == "" {
return "" return ""
} }
@@ -349,7 +389,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
return result.String() return result.String()
} }
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) { func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
@@ -436,7 +476,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
} }
// encryptFileName encrypts a file path // encryptFileName encrypts a file path
func (c *Cipher) encryptFileName(in string) string { func (c *cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/") segments := strings.Split(in, "/")
for i := range segments { for i := range segments {
// Skip directory name encryption if the user chose to // Skip directory name encryption if the user chose to
@@ -444,38 +484,17 @@ func (c *Cipher) encryptFileName(in string) string {
if !c.dirNameEncrypt && i != (len(segments)-1) { if !c.dirNameEncrypt && i != (len(segments)-1) {
continue continue
} }
// Strip version string so that only the non-versioned part
// of the file name gets encrypted/obfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard { if c.mode == NameEncryptionStandard {
segments[i] = c.encryptSegment(segments[i]) segments[i] = c.encryptSegment(segments[i])
} else { } else {
segments[i] = c.obfuscateSegment(segments[i]) segments[i] = c.obfuscateSegment(segments[i])
} }
// Add back a version to the encrypted/obfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
} }
return strings.Join(segments, "/") return strings.Join(segments, "/")
} }
// EncryptFileName encrypts a file path // EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string { func (c *cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
return in + encryptedSuffix return in + encryptedSuffix
} }
@@ -483,7 +502,7 @@ func (c *Cipher) EncryptFileName(in string) string {
} }
// EncryptDirName encrypts a directory path // EncryptDirName encrypts a directory path
func (c *Cipher) EncryptDirName(in string) string { func (c *cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt { if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in return in
} }
@@ -491,7 +510,7 @@ func (c *Cipher) EncryptDirName(in string) string {
} }
// decryptFileName decrypts a file path // decryptFileName decrypts a file path
func (c *Cipher) decryptFileName(in string) (string, error) { func (c *cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/") segments := strings.Split(in, "/")
for i := range segments { for i := range segments {
var err error var err error
@@ -500,21 +519,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if !c.dirNameEncrypt && i != (len(segments)-1) { if !c.dirNameEncrypt && i != (len(segments)-1) {
continue continue
} }
// Strip version string so that only the non-versioned part
// of the file name gets decrypted/deobfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard { if c.mode == NameEncryptionStandard {
segments[i], err = c.decryptSegment(segments[i]) segments[i], err = c.decryptSegment(segments[i])
} else { } else {
@@ -524,46 +528,31 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
// Add back a version to the decrypted/deobfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
} }
return strings.Join(segments, "/"), nil return strings.Join(segments, "/"), nil
} }
// DecryptFileName decrypts a file path // DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) { func (c *cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff { if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix) remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) { if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile return in[:remainingLength], nil
} }
decrypted := in[:remainingLength] return "", ErrorNotAnEncryptedFile
if version.Match(decrypted) {
_, unversioned := version.Remove(decrypted)
if unversioned == "" {
return "", ErrorNotAnEncryptedFile
}
}
// Leave the version string on, if it was there
return decrypted, nil
} }
return c.decryptFileName(in) return c.decryptFileName(in)
} }
// DecryptDirName decrypts a directory path // DecryptDirName decrypts a directory path
func (c *Cipher) DecryptDirName(in string) (string, error) { func (c *cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt { if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil return in, nil
} }
return c.decryptFileName(in) return c.decryptFileName(in)
} }
// NameEncryptionMode returns the encryption mode in use for names func (c *cipher) NameEncryptionMode() NameEncryptionMode {
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode return c.mode
} }
@@ -611,7 +600,7 @@ func (n *nonce) increment() {
n.carry(0) n.carry(0)
} }
// add a uint64 to the nonce // add an uint64 to the nonce
func (n *nonce) add(x uint64) { func (n *nonce) add(x uint64) {
carry := uint16(0) carry := uint16(0)
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
@@ -631,7 +620,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct { type encrypter struct {
mu sync.Mutex mu sync.Mutex
in io.Reader in io.Reader
c *Cipher c *cipher
nonce nonce nonce nonce
buf []byte buf []byte
readBuf []byte readBuf []byte
@@ -641,7 +630,7 @@ type encrypter struct {
} }
// newEncrypter creates a new file handle encrypting on the fly // newEncrypter creates a new file handle encrypting on the fly
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) { func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{ fh := &encrypter{
in: in, in: in,
c: c, c: c,
@@ -685,8 +674,11 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
} }
// possibly err != nil here, but we will process the // possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err // data and the next call to ReadFull will return 0, err
// Write nonce to start of block
copy(fh.buf, fh.nonce[:])
// Encrypt the block using the nonce // Encrypt the block using the nonce
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) block := fh.buf
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0 fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n fh.bufSize = blockHeaderSize + n
fh.nonce.increment() fh.nonce.increment()
@@ -710,19 +702,13 @@ func (fh *encrypter) finish(err error) (int, error) {
} }
// Encrypt data encrypts the data stream // Encrypt data encrypts the data stream
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) { func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil) out, err := c.newEncrypter(in, nil)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
return wrap(out), out, nil // and wrap the accounting back on return wrap(out), nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
} }
// decrypter decrypts an io.ReaderCloser on the fly // decrypter decrypts an io.ReaderCloser on the fly
@@ -731,7 +717,7 @@ type decrypter struct {
rc io.ReadCloser rc io.ReadCloser
nonce nonce nonce nonce
initialNonce nonce initialNonce nonce
c *Cipher c *cipher
buf []byte buf []byte
readBuf []byte readBuf []byte
bufIndex int bufIndex int
@@ -742,7 +728,7 @@ type decrypter struct {
} }
// newDecrypter creates a new file handle decrypting on the fly // newDecrypter creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{ fh := &decrypter{
rc: rc, rc: rc,
c: c, c: c,
@@ -770,7 +756,7 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
} }
// newDecrypterSeek creates a new file handle decrypting on the fly // newDecrypterSeek creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser var rc io.ReadCloser
doRangeSeek := false doRangeSeek := false
setLimit := false setLimit := false
@@ -831,7 +817,8 @@ func (fh *decrypter) fillBuffer() (err error) {
return ErrorEncryptedFileBadHeader return ErrorEncryptedFileBadHeader
} }
// Decrypt the block using the nonce // Decrypt the block using the nonce
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) block := fh.buf
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok { if !ok {
if err != nil { if err != nil {
return err // return pending error as it is likely more accurate return err // return pending error as it is likely more accurate
@@ -1044,7 +1031,7 @@ func (fh *decrypter) finishAndClose(err error) error {
} }
// DecryptData decrypts the data stream // DecryptData decrypts the data stream
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) { func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc) out, err := c.newDecrypter(rc)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1057,7 +1044,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit) out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1066,7 +1053,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
} }
// EncryptedSize calculates the size of the data when encrypted // EncryptedSize calculates the size of the data when encrypted
func (c *Cipher) EncryptedSize(size int64) int64 { func (c *cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 { if residue != 0 {
@@ -1076,7 +1063,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
} }
// DecryptedSize calculates the size of the data when decrypted // DecryptedSize calculates the size of the data when decrypted
func (c *Cipher) DecryptedSize(size int64) (int64, error) { func (c *cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize) size -= int64(fileHeaderSize)
if size < 0 { if size < 0 {
return 0, ErrorEncryptedFileTooShort return 0, ErrorEncryptedFileTooShort
@@ -1095,6 +1082,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces // check interfaces
var ( var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil) _ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil) _ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil) _ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7" "github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -45,6 +44,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3") assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
} }
func TestValidString(t *testing.T) {
for _, test := range []struct {
in string
expected error
}{
{"", nil},
{"\x01", ErrorBadDecryptControlChar},
{"a\x02", ErrorBadDecryptControlChar},
{"abc\x03", ErrorBadDecryptControlChar},
{"abc\x04def", ErrorBadDecryptControlChar},
{"\x05d", ErrorBadDecryptControlChar},
{"\x06def", ErrorBadDecryptControlChar},
{"\x07", ErrorBadDecryptControlChar},
{"\x08", ErrorBadDecryptControlChar},
{"\x09", ErrorBadDecryptControlChar},
{"\x0A", ErrorBadDecryptControlChar},
{"\x0B", ErrorBadDecryptControlChar},
{"\x0C", ErrorBadDecryptControlChar},
{"\x0D", ErrorBadDecryptControlChar},
{"\x0E", ErrorBadDecryptControlChar},
{"\x0F", ErrorBadDecryptControlChar},
{"\x10", ErrorBadDecryptControlChar},
{"\x11", ErrorBadDecryptControlChar},
{"\x12", ErrorBadDecryptControlChar},
{"\x13", ErrorBadDecryptControlChar},
{"\x14", ErrorBadDecryptControlChar},
{"\x15", ErrorBadDecryptControlChar},
{"\x16", ErrorBadDecryptControlChar},
{"\x17", ErrorBadDecryptControlChar},
{"\x18", ErrorBadDecryptControlChar},
{"\x19", ErrorBadDecryptControlChar},
{"\x1A", ErrorBadDecryptControlChar},
{"\x1B", ErrorBadDecryptControlChar},
{"\x1C", ErrorBadDecryptControlChar},
{"\x1D", ErrorBadDecryptControlChar},
{"\x1E", ErrorBadDecryptControlChar},
{"\x1F", ErrorBadDecryptControlChar},
{"\x20", nil},
{"\x7E", nil},
{"\x7F", ErrorBadDecryptControlChar},
{"£100", nil},
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
{"£100", nil},
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
{"a", nil}, // Valid ASCII
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
} {
actual := checkValidString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
}
}
func TestEncodeFileName(t *testing.T) { func TestEncodeFileName(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in string in string
@@ -148,6 +210,8 @@ func TestDecryptSegment(t *testing.T) {
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, {encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
} { } {
actual, actualErr := c.decryptSegment(test.in) actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
@@ -160,29 +224,22 @@ func TestEncryptFileName(t *testing.T) {
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1")) assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12")) assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123")) assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Standard mode with directory name encryption off // Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false) c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1")) assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12")) assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode // Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true) c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123")) assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode // Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true) c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off // Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false) c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello")) assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1")) assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0")) assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
} }
@@ -201,19 +258,14 @@ func TestDecryptFileName(t *testing.T) {
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil}, {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize}, {NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil}, {NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil}, {NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile}, {NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile}, {NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil}, {NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile}, {NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil}, {NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil}, {NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil}, {NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} { } {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt) c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
actual, actualErr := c.DecryptFileName(test.in) actual, actualErr := c.DecryptFileName(test.in)
@@ -653,16 +705,16 @@ var (
// Test test infrastructure first! // Test test infrastructure first!
func TestRandomSource(t *testing.T) { func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8) source := newRandomSource(1E8)
sink := newRandomSource(1e8) sink := newRandomSource(1E8)
n, err := io.Copy(sink, source) n, err := io.Copy(sink, source)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(1e8), n) assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8) source = newRandomSource(1E8)
buf := make([]byte, 16) buf := make([]byte, 16)
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1E8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream") assert.Error(t, err, "Error in stream")
} }
@@ -702,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
} }
func TestEncryptDecrypt1(t *testing.T) { func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7) testEncryptDecrypt(t, 1, 1E7)
} }
func TestEncryptDecrypt32(t *testing.T) { func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8) testEncryptDecrypt(t, 32, 1E8)
} }
func TestEncryptDecrypt4096(t *testing.T) { func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8) testEncryptDecrypt(t, 4096, 1E8)
} }
func TestEncryptDecrypt65536(t *testing.T) { func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8) testEncryptDecrypt(t, 65536, 1E8)
} }
func TestEncryptDecrypt65537(t *testing.T) { func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8) testEncryptDecrypt(t, 65537, 1E8)
} }
var ( var (
@@ -751,7 +803,7 @@ func TestEncryptData(t *testing.T) {
} { } {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works // Check encode works
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
@@ -774,7 +826,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) { func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{} z := &zeroes{}
@@ -797,15 +849,23 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in := &errorReader{io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct { type closeDetector struct {
io.Reader io.Reader
closed int closed int
@@ -825,7 +885,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) { func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0)) cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -843,7 +903,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed) assert.Equal(t, 1, cd.closed)
} }
er := &readers.ErrorReader{Err: errors.New("potato")} er := &errorReader{errors.New("potato")}
cd = newCloseDetector(er) cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd) fh, err = c.newDecrypter(cd)
assert.Nil(t, fh) assert.Nil(t, fh)
@@ -869,14 +929,14 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF} in2 := &errorReader{io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16) in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2)) in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -941,7 +1001,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
assert.Equal(t, 0, n) assert.Equal(t, 0, n)
} }
// Now try decoding it with an open/seek // Now try decoding it with a open/seek
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
@@ -1123,7 +1183,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file // Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1) in1 := bytes.NewBuffer(file1)
in2 := &readers.ErrorReader{Err: errors.New("potato")} in2 := &errorReader{errors.New("potato")}
in := io.MultiReader(in1, in2) in := io.MultiReader(in1, in2)
cd := newCloseDetector(in) cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)

View File

@@ -5,14 +5,12 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"path"
"strings" "strings"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
@@ -27,10 +25,9 @@ func init() {
Name: "crypt", Name: "crypt",
Description: "Encrypt/Decrypt a remote", Description: "Encrypt/Decrypt a remote",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true, Required: true,
}, { }, {
Name: "filename_encryption", Name: "filename_encryption",
@@ -38,21 +35,19 @@ func init() {
Default: "standard", Default: "standard",
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard", Value: "standard",
Help: "Encrypt the filenames see the docs for the details.", Help: "Encrypt the filenames see the docs for the details.",
}, { }, {
Value: "obfuscate", Value: "obfuscate",
Help: "Very simple filename obfuscation.", Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, },
}, },
}, { }, {
Name: "directory_name_encryption", Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact. Help: "Option to either encrypt directory names or leave them intact.",
NB If filename_encryption is "off" then this option will do nothing.`,
Default: true, Default: true,
Examples: []fs.OptionExample{ Examples: []fs.OptionExample{
{ {
@@ -68,25 +63,10 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password", Name: "password",
Help: "Password or pass phrase for encryption.", Help: "Password or pass phrase for encryption.",
IsPassword: true, IsPassword: true,
Required: true,
}, { }, {
Name: "password2", Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true, IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server-side operations (e.g. copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, { }, {
Name: "show_mapping", Name: "show_mapping",
Help: `For all files listed show how the names encrypt. Help: `For all files listed show how the names encrypt.
@@ -101,27 +81,12 @@ names, or for debugging purposes.`,
Default: false, Default: false,
Hide: fs.OptionHideConfigurator, Hide: fs.OptionHideConfigurator,
Advanced: true, Advanced: true,
}, {
Name: "no_data_encryption",
Help: "Option to either encrypt file data or leave it unencrypted.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Don't encrypt file data, leave it unencrypted.",
},
{
Value: "false",
Help: "Encrypt file data.",
},
},
}}, }},
}) })
} }
// newCipherForConfig constructs a Cipher for the given config name // newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (*Cipher, error) { func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption) mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -148,7 +113,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
} }
// NewCipher constructs a Cipher for the given config // NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (*Cipher, error) { func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -159,7 +124,7 @@ func NewCipher(m configmap.Mapper) (*Cipher, error) {
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -174,25 +139,20 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if strings.HasPrefix(remote, name+":") { if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
} }
// Make sure to remove trailing . referring to the current dir wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if path.Base(rpath) == "." { if err != nil {
rpath = strings.TrimSuffix(rpath, ".") return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
} }
// Look for a file first // Look for a file first
var wrappedFs fs.Fs remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
if rpath == "" { wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
wrappedFs, err = cache.Get(ctx, remote) // if that didn't produce a file, look for a directory
} else { if err != fs.ErrorIsFile {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath)) remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath) wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(ctx, remotePath)
}
} }
if err != fs.ErrorIsFile && err != nil { if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote) return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
} }
f := &Fs{ f := &Fs{
Fs: wrappedFs, Fs: wrappedFs,
@@ -201,7 +161,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
opt: *opt, opt: *opt,
cipher: cipher, cipher: cipher,
} }
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are // the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs // ANDed with the ones from wrappedFs
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -213,8 +172,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true, SetTier: true,
GetTier: true, GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err return f, err
} }
@@ -224,10 +182,8 @@ type Options struct {
Remote string `config:"remote"` Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"` FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"` DirectoryNameEncryption bool `config:"directory_name_encryption"`
NoDataEncryption bool `config:"no_data_encryption"`
Password string `config:"password"` Password string `config:"password"`
Password2 string `config:"password2"` Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"` ShowMapping bool `config:"show_mapping"`
} }
@@ -239,7 +195,7 @@ type Fs struct {
root string root string
opt Options opt Options
features *fs.Features // optional features features *fs.Features // optional features
cipher *Cipher cipher Cipher
} }
// Name of the remote (as passed into NewFs) // Name of the remote (as passed into NewFs)
@@ -276,7 +232,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
*entries = append(*entries, f.newObject(obj)) *entries = append(*entries, f.newObject(obj))
} }
// Encrypt a directory file name to entries. // Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
@@ -362,12 +318,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -391,7 +343,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// Transfer the data // Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...) o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -404,16 +356,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, errors.Wrap(err, "failed to read destination hash")
} }
if srcHash != "" && dstHash != "" { if srcHash != "" && dstHash != "" && srcHash != dstHash {
if srcHash != dstHash { // remove object
// remove object err = o.Remove(ctx)
err = o.Remove(ctx) if err != nil {
if err != nil { fs.Errorf(o, "Failed to remove corrupted object: %v", err)
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
} }
fs.Debugf(src, "%v = %s OK", ht, srcHash) return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
} }
} }
@@ -453,21 +402,21 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
} }
// Purge all files in the directory specified // Purge all files in the root and the root directory
// //
// Implement this if you have a way of deleting all the files // Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List() // quicker than just running Remove() on the result of List()
// //
// Return an error if it doesn't exist // Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context) error {
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return do(ctx, f.cipher.EncryptDirName(dir)) return do(ctx)
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -492,7 +441,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.newObject(oResult), nil return f.newObject(oResult), nil
} }
// Move src to this remote using server-side move operations. // Move src to this remote using server side move operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -518,7 +467,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations. // using server side move operations.
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -547,11 +496,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
} }
wrappedIn, encrypter, err := f.cipher.encryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
return nil, err return nil, err
} }
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce)) o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -604,53 +553,18 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName) return f.cipher.DecryptFileName(encryptedFileName)
} }
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of // ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly // src with it, and calculates the hash given by HashType on the fly
// //
// Note that we break lots of encapsulation in this function. // Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
if f.opt.NoDataEncryption {
return src.Hash(ctx, hashType)
}
// Read the nonce - opening the file is sufficient to read the nonce in // Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
} }
d, err := f.cipher.newDecrypter(in) d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil { if err != nil {
_ = in.Close() _ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -675,7 +589,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read") return "", errors.Wrap(err, "failed to close nonce read")
} }
return f.computeHashWithNonce(ctx, nonce, src, hashType) // Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
} }
// MergeDirs merges the contents of all the directories passed // MergeDirs merges the contents of all the directories passed
@@ -702,7 +639,7 @@ func (f *Fs) DirCacheFlush() {
} }
// PublicLink generates a public link to the remote path (usually readable by anyone) // PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink do := f.Fs.Features().PublicLink
if do == nil { if do == nil {
return "", errors.New("PublicLink not supported") return "", errors.New("PublicLink not supported")
@@ -710,9 +647,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(ctx, remote)
if err != nil { if err != nil {
// assume it is a directory // assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink) return do(ctx, f.cipher.EncryptDirName(remote))
} }
return do(ctx, o.(*Object).Object.Remote(), expire, unlink) return do(ctx, o.(*Object).Object.Remote())
} }
// ChangeNotify calls the passed function with a path // ChangeNotify calls the passed function with a path
@@ -747,67 +684,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan) do(ctx, wrappedNotifyFunc, pollIntervalChan)
} }
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
// //
// This decrypts the remote name and decrypts the data // This decrypts the remote name and decrypts the data
@@ -849,13 +725,9 @@ func (o *Object) Remote() string {
// Size returns the size of the file // Size returns the size of the file
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
size := o.Object.Size() size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if !o.f.opt.NoDataEncryption { if err != nil {
var err error fs.Debugf(o, "Bad size for decrypt: %v", err)
size, err = o.f.cipher.DecryptedSize(size)
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
} }
return size return size
} }
@@ -873,10 +745,6 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if o.f.opt.NoDataEncryption {
return o.Object.Open(ctx, options...)
}
var openOptions []fs.OpenOption var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -952,30 +820,18 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx) return do(ctx)
} }
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
do := f.Fs.Features().Shutdown
if do == nil {
return nil
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source
// //
// This encrypts the remote name and adjusts the size // This encrypts the remote name and adjusts the size
type ObjectInfo struct { type ObjectInfo struct {
fs.ObjectInfo fs.ObjectInfo
f *Fs f *Fs
nonce nonce
} }
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo { func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{ return &ObjectInfo{
ObjectInfo: src, ObjectInfo: src,
f: f, f: f,
nonce: nonce,
} }
} }
@@ -1001,23 +857,6 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil return "", nil
} }
@@ -1056,7 +895,6 @@ var (
_ fs.Copier = (*Fs)(nil) _ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil) _ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil) _ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil) _ fs.CleanUpper = (*Fs)(nil)
@@ -1070,7 +908,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil) _ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil) _ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil) _ fs.Disconnecter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -1,143 +0,0 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encrypter
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

View File

@@ -91,26 +91,3 @@ func TestObfuscate(t *testing.T) {
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
// TestNoDataObfuscate runs integration tests against the remote
func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

2020
backend/drive/drive.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,21 +7,15 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"mime" "mime"
"os"
"path"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
@@ -111,7 +105,6 @@ func TestInternalParseExtensions(t *testing.T) {
} }
func TestInternalFindExportFormat(t *testing.T) { func TestInternalFindExportFormat(t *testing.T) {
ctx := context.Background()
item := &drive.File{ item := &drive.File{
Name: "file", Name: "file",
MimeType: "application/vnd.google-apps.document", MimeType: "application/vnd.google-apps.document",
@@ -129,7 +122,7 @@ func TestInternalFindExportFormat(t *testing.T) {
} { } {
f := new(Fs) f := new(Fs)
f.exportExtensions = test.extensions f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item) gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension) assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" { if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename) assert.Equal(t, item.Name+gotExtension, gotFilename)
@@ -197,7 +190,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err) require.NoError(t, err)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err) require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -211,7 +204,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files")) testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err) require.NoError(t, err)
testFilesFs, err := fs.NewFs(context.Background(), testFilesPath) testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err) require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
@@ -275,192 +268,6 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
} }
} }
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
fi, err := os.Stat(filePath)
require.NoError(t, err)
assert.Equal(t, int64(100), fi.Size())
err = os.Remove(filePath)
require.NoError(t, err)
}
t.Run("BadID", func(t *testing.T) {
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests // These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) { t.Run("DocumentImport", func(t *testing.T) {
@@ -475,9 +282,6 @@ func (f *Fs) InternalTest(t *testing.T) {
}) })
}) })
}) })
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
} }
var _ fstests.InternalTester = (*Fs)(nil) var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -11,15 +11,15 @@
package drive package drive
import ( import (
"bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"regexp"
"strconv" "strconv"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -50,7 +50,7 @@ type resumableUpload struct {
} }
// Upload the io.Reader in of size bytes with contentType and info // Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{ params := url.Values{
"alt": {"json"}, "alt": {"json"},
"uploadType": {"resumable"}, "uploadType": {"resumable"},
@@ -77,7 +77,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
return false, err return false, err
} }
var req *http.Request var req *http.Request
req, err = http.NewRequestWithContext(ctx, method, urls, body) req, err = http.NewRequest(method, urls, body)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -86,15 +86,13 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
}) })
req.Header.Set("Content-Type", "application/json; charset=UTF-8") req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType) req.Header.Set("X-Upload-Content-Type", contentType)
if size >= 0 { req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
}
res, err = f.client.Do(req) res, err = f.client.Do(req)
if err == nil { if err == nil {
defer googleapi.CloseBody(res) defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res) err = googleapi.CheckResponse(res)
} }
return f.shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -108,30 +106,60 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType, MediaType: contentType,
ContentLength: size, ContentLength: size,
} }
return rx.Upload(ctx) return rx.Upload()
} }
// Make an http.Request for the range passed in // Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request { func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body) req, _ := http.NewRequest("POST", rx.URI, body)
req.ContentLength = reqSize req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {
totalSize = strconv.FormatInt(rx.ContentLength, 10)
}
if reqSize != 0 { if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize)) req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else { } else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize)) req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
} }
req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("Content-Type", rx.MediaType)
return req return req
} }
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart) _, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize) req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 599, err return 599, err
@@ -164,45 +192,23 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries // It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0) start := int64(0)
var StatusCode int var StatusCode int
var err error var err error
buf := make([]byte, int(rx.f.opt.ChunkSize)) buf := make([]byte, int(rx.f.opt.ChunkSize))
for finished := false; !finished; { for start < rx.ContentLength {
var reqSize int64 reqSize := rx.ContentLength - start
var chunk io.ReadSeeker if reqSize >= int64(rx.f.opt.ChunkSize) {
if rx.ContentLength >= 0 { reqSize = int64(rx.f.opt.ChunkSize)
// If size known use repeatable reader for smoother bwlimit
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer
var n int
n, err = readers.ReadFill(rx.Media, buf)
if err == io.EOF {
// Send the last chunk with the correct ContentLength
// otherwise Google doesn't know we've finished
rx.ContentLength = start + int64(n)
finished = true
} else if err != nil {
return nil, err
}
reqSize = int64(n)
chunk = bytes.NewReader(buf[:reqSize])
} }
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk // Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) { err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize) StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := rx.f.shouldRetry(ctx, err) again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false again = false
err = nil err = nil

View File

@@ -1,350 +0,0 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxTries = 120
for try := 1; try <= maxTries; try++ {
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
} else {
if batchStatus.Tag == "complete" {
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > time.Second {
sleepTime = time.Second
}
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

1087
backend/dropbox/dropbox.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,44 +0,0 @@
package dropbox
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestInternalCheckPathLength(t *testing.T) {
rep := func(n int, r rune) (out string) {
rs := make([]rune, n)
for i := range rs {
rs[i] = r
}
return string(rs)
}
for _, test := range []struct {
in string
ok bool
}{
{in: "", ok: true},
{in: rep(maxFileNameLength, 'a'), ok: true},
{in: rep(maxFileNameLength+1, 'a'), ok: false},
{in: rep(maxFileNameLength, '£'), ok: true},
{in: rep(maxFileNameLength+1, '£'), ok: false},
{in: rep(maxFileNameLength, '☺'), ok: true},
{in: rep(maxFileNameLength+1, '☺'), ok: false},
{in: rep(maxFileNameLength, '你'), ok: true},
{in: rep(maxFileNameLength+1, '你'), ok: false},
{in: "/ok/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength, 'a') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, 'a') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '£') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '£') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '☺') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '☺') + "/ok", ok: false},
{in: "/ok/" + rep(maxFileNameLength, '你') + "/ok", ok: true},
{in: "/ok/" + rep(maxFileNameLength+1, '你') + "/ok", ok: false},
} {
err := checkPathLength(test.in)
assert.Equal(t, test.ok, err == nil, test.in)
}
}

View File

@@ -4,10 +4,8 @@ import (
"context" "context"
"io" "io"
"net/http" "net/http"
"net/url"
"regexp" "regexp"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -19,7 +17,6 @@ import (
// retryErrorCodes is a slice of error codes that we will retry // retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{ var retryErrorCodes = []int{
429, // Too Many Requests. 429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error 500, // Internal Server Error
502, // Bad Gateway 502, // Bad Gateway
503, // Service Unavailable 503, // Service Unavailable
@@ -29,69 +26,16 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func shouldRetry(resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
request := FileInfoRequest{
URL: url,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/info.cgi",
}
var file File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
}
return &file, err
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{ request := DownloadRequest{
URL: url, URL: url,
Single: 1, Single: 1,
Pass: f.opt.FilePassword,
} }
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -100,8 +44,8 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token) resp, err := f.rest.CallJSON(&opts, &request, &token)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, errors.Wrap(err, "couldn't list files")
@@ -120,22 +64,16 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) { func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
RootURL: "https://1fichier.com/dir/", RootURL: "https://1fichier.com/dir/",
Path: id, Path: id,
Parameters: map[string][]string{"json": {"1"}}, Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
} }
var sharedFiles SharedFolderResponse var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles) resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, errors.Wrap(err, "couldn't list files")
@@ -150,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return entries, nil return entries, nil
} }
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) { func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID) // fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{ request := ListFilesRequest{
FolderID: directoryID, FolderID: directoryID,
@@ -163,21 +101,17 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{} filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList) resp, err := f.rest.CallJSON(&opts, &request, filesList)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list files") return nil, errors.Wrap(err, "couldn't list files")
} }
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
}
return filesList, nil return filesList, nil
} }
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) { func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID) // fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{ request := ListFolderRequest{
@@ -191,17 +125,12 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{} foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList) resp, err := f.rest.CallJSON(&opts, &request, foldersList)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list folders") return nil, errors.Wrap(err, "couldn't list folders")
} }
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID) // fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
@@ -209,6 +138,11 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
} }
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -219,12 +153,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err return nil, err
} }
files, err := f.listFiles(ctx, folderID) files, err := f.listFiles(folderID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
folders, err := f.listFolders(ctx, folderID) folders, err := f.listFolders(folderID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -232,6 +166,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders)) entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items { for i, item := range files.Items {
item.Filename = restoreReservedChars(item.Filename)
entries[i] = f.newObjectFromFile(ctx, dir, item) entries[i] = f.newObjectFromFile(ctx, dir, item)
} }
@@ -241,6 +176,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err return nil, err
} }
folder.Name = restoreReservedChars(folder.Name)
fullPath := getRemote(dir, folder.Name) fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID) folderID := strconv.Itoa(folder.ID)
@@ -269,8 +205,8 @@ func getRemote(dir, fileName string) string {
return dir + "/" + fileName return dir + "/" + fileName
} }
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) { func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := f.opt.Enc.FromStandardName(leaf) name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID) // fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{ request := MakeFolderRequest{
@@ -285,8 +221,8 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{} response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response) resp, err := f.rest.CallJSON(&opts, &request, response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create folder") return nil, errors.Wrap(err, "couldn't create folder")
@@ -297,7 +233,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return response, err return response, err
} }
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) { func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID) // fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{ request := &RemoveFolderRequest{
@@ -312,14 +248,14 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
response = &GenericOKResponse{} response = &GenericOKResponse{}
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response) resp, err = f.rest.CallJSON(&opts, request, response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder") return nil, errors.Wrap(err, "couldn't remove folder")
} }
if response.Status != "OK" { if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message) return nil, errors.New("Can't remove non-empty dir")
} }
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID) // fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -327,7 +263,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return response, nil return response, nil
} }
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) { func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{ request := &RemoveFileRequest{
Files: []RmFile{ Files: []RmFile{
{url}, {url},
@@ -341,8 +277,8 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{} response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response) resp, err := f.rest.CallJSON(&opts, request, response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -354,85 +290,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil return response, nil
} }
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) { func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
request := &MoveFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/mv.cgi",
}
response = &MoveFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/cp.cgi",
}
response = &CopyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node") // fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{ opts := rest.Opts{
@@ -443,8 +301,8 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{} response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response) resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node") return nil, errors.Wrap(err, "didnt got an upload node")
@@ -455,10 +313,10 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err return response, err
} }
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) { func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName) // fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName) fileName = replaceReservedChars(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID") return nil, errors.New("Invalid UploadID")
@@ -473,7 +331,6 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true, NoResponse: true,
Body: in, Body: in,
ContentLength: &size, ContentLength: &size,
Options: options,
MultipartContentName: "file[]", MultipartContentName: "file[]",
MultipartFileName: fileName, MultipartFileName: fileName,
MultipartParams: map[string][]string{ MultipartParams: map[string][]string{
@@ -486,8 +343,8 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
} }
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil) resp, err := f.rest.CallJSON(&opts, nil, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -499,7 +356,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
return response, err return response, err
} }
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) { func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID) // fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) { if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
@@ -520,8 +377,8 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{} response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response) resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {

View File

@@ -11,87 +11,49 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
) )
const ( const (
rootID = "0" rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1" apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
) )
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "fichier", Name: "fichier",
Description: "1Fichier", Description: "1Fichier",
NewFs: NewFs, Config: func(name string, config configmap.Mapper) {
Options: []fs.Option{{ },
Help: "Your API Key, get it from https://1fichier.com/console/params.pl", NewFs: NewFs,
Name: "api_key", Options: []fs.Option{
}, { {
Help: "If you want to download a shared folder, add this parameter", Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "shared_folder", Name: "api_key",
Required: false, },
Advanced: true, {
}, { Help: "If you want to download a shared folder, add this parameter",
Help: "If you want to download a shared file that is password protected, add this parameter", Name: "shared_folder",
Name: "file_password", Required: false,
Required: false, Advanced: true,
Advanced: true, },
IsPassword: true, },
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Characters that need escaping
//
// '\\': '', // FULLWIDTH REVERSE SOLIDUS
// '<': '', // FULLWIDTH LESS-THAN SIGN
// '>': '', // FULLWIDTH GREATER-THAN SIGN
// '"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
// '\'': '', // FULLWIDTH APOSTROPHE
// '$': '', // FULLWIDTH DOLLAR SIGN
// '`': '', // FULLWIDTH GRAVE ACCENT
//
// Leading space and trailing space
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeDollar |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
APIKey string `config:"api_key"` APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"` SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs is the interface a cloud storage system must provide // Fs is the interface a cloud storage system must provide
@@ -99,9 +61,9 @@ type Fs struct {
root string root string
name string name string
features *fs.Features features *fs.Features
opt Options
dirCache *dircache.DirCache dirCache *dircache.DirCache
baseClient *http.Client baseClient *http.Client
options *Options
pacer *fs.Pacer pacer *fs.Pacer
rest *rest.Client rest *rest.Client
} }
@@ -112,7 +74,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
if err != nil { if err != nil {
return "", false, err return "", false, err
} }
folders, err := f.listFolders(ctx, folderID) folders, err := f.listFolders(folderID)
if err != nil { if err != nil {
return "", false, err return "", false, err
} }
@@ -133,7 +95,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
if err != nil { if err != nil {
return "", err return "", err
} }
resp, err := f.makeFolder(ctx, leaf, folderID) resp, err := f.makeFolder(leaf, folderID)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -179,7 +141,8 @@ func (f *Fs) Features() *fs.Features {
// //
// On Windows avoid single character remote names as they can be mixed // On Windows avoid single character remote names as they can be mixed
// up with drive letters. // up with drive letters.
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
root := replaceReservedChars(rootleaf)
opt := new(Options) opt := new(Options)
err := configstruct.Set(config, opt) err := configstruct.Set(config, opt)
if err != nil { if err != nil {
@@ -197,25 +160,26 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, options: opt,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))), pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{}, baseClient: &http.Client{},
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
DuplicateFiles: true, DuplicateFiles: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ReadMimeType: true, }).Fill(f)
}).Fill(ctx, f)
client := fshttp.NewClient(ctx) client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL) f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey) f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(ctx, false)
if err != nil { if err != nil {
@@ -238,7 +202,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
} }
return nil, err return nil, err
} }
f.features.Fill(ctx, &tempF) f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/rclone/rclone/issues/2182
@@ -260,8 +224,8 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFolder != "" { if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.opt.SharedFolder) return f.listSharedFiles(ctx, f.options.SharedFolder)
} }
dirContent, err := f.listDir(ctx, dir) dirContent, err := f.listDir(ctx, dir)
@@ -275,7 +239,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound. // it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -287,7 +251,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
files, err := f.listFiles(ctx, folderID) files, err := f.listFiles(folderID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -309,7 +273,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
// //
// When called from outside an Fs by rclone, src.Size() will always be >= 0. // When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either // But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic). // return an error or upload it properly (rather than e.g. calling panic).
// //
@@ -317,10 +281,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.NewObject(ctx, src.Remote()) exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err { switch err {
case nil: case nil:
return existingObj, existingObj.Update(ctx, in, src, options...) return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(ctx, in, src, options...)
@@ -334,36 +298,34 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) { if size > int64(100E9) {
return nil, errors.New("File too big, cant upload") return nil, errors.New("File too big, cant upload")
} else if size == 0 { } else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles return nil, fs.ErrorCantUploadEmptyFiles
} }
nodeResponse, err := f.getUploadNode(ctx) nodeResponse, err := f.getUploadNode()
if err != nil { if err != nil {
return nil, err return nil, err
} }
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...) _, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL) fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(fileUploadResponse.Links) == 0 { if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("upload response not found") return nil, errors.New("unexpected amount of files")
} else if len(fileUploadResponse.Links) > 1 {
fs.Debugf(remote, "Multiple upload responses found, using the first")
} }
link := fileUploadResponse.Links[0] link := fileUploadResponse.Links[0]
@@ -377,13 +339,14 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
fs: f, fs: f,
remote: remote, remote: remote,
file: File{ file: File{
ACL: 0,
CDN: 0, CDN: 0,
Checksum: link.Whirlpool, Checksum: link.Whirlpool,
ContentType: "", ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"), Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename, Filename: link.Filename,
Pass: 0, Pass: 0,
Size: fileSize, Size: int(fileSize),
URL: link.Download, URL: link.Download,
}, },
}, nil }, nil
@@ -401,7 +364,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// //
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true) err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err return err
} }
@@ -409,6 +378,11 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil { if err != nil {
return err return err
@@ -419,7 +393,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return err return err
} }
_, err = f.removeFolder(ctx, dir, folderID) _, err = f.removeFolder(dir, folderID)
if err != nil { if err != nil {
return err return err
} }
@@ -429,109 +403,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return nil return nil
} }
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
file, err := f.readFileInfo(ctx, url)
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
return o.(*Object).file.URL, nil
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil) _ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil) _ dircache.DirCacher = (*Fs)(nil)
) )

View File

@@ -4,11 +4,13 @@ package fichier
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:", RemoteName: "TestFichier:",
}) })

View File

@@ -43,7 +43,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Size returns the size of the file // Size returns the size of the file
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
return o.file.Size return int64(o.file.Size)
} }
// Fs returns read only access to the Fs that this object is part of // Fs returns read only access to the Fs that this object is part of
@@ -72,14 +72,10 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
//return errors.New("setting modtime is not supported for 1fichier remotes") //return errors.New("setting modtime is not supported for 1fichier remotes")
} }
func (o *Object) setMetaData(file File) {
o.file = file
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size) fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL) downloadToken, err := o.fs.getDownloadToken(o.file.URL)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -93,8 +89,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts) resp, err = o.fs.rest.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -105,7 +101,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
// //
// When called from outside an Fs by rclone, src.Size() will always be >= 0. // When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic). // return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
@@ -135,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL) // fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL) _, err := o.fs.deleteFile(o.file.URL)
if err != nil { if err != nil {
return err return err

View File

@@ -0,0 +1,71 @@
/*
Translate file names for 1fichier
1Fichier reserved characters
The following characters are 1Fichier reserved characters, and can't
be used in 1Fichier folder and file names.
*/
package fichier
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// 1Fichier has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'$': '', // FULLWIDTH DOLLAR SIGN
'`': '', // FULLWIDTH GRAVE ACCENT
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,24 @@
package fichier
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{"\"'<>/\\$`", `/`},
{" leading space", "␠leading space"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,10 +1,5 @@
package fichier package fichier
// FileInfoRequest is the request structure of the corresponding request
type FileInfoRequest struct {
URL string `json:"url"`
}
// ListFolderRequest is the request structure of the corresponding request // ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct { type ListFolderRequest struct {
FolderID int `json:"folder_id"` FolderID int `json:"folder_id"`
@@ -19,7 +14,6 @@ type ListFilesRequest struct {
type DownloadRequest struct { type DownloadRequest struct {
URL string `json:"url"` URL string `json:"url"`
Single int `json:"single"` Single int `json:"single"`
Pass string `json:"pass,omitempty"`
} }
// RemoveFolderRequest is the request structure of the corresponding request // RemoveFolderRequest is the request structure of the corresponding request
@@ -55,65 +49,6 @@ type MakeFolderResponse struct {
FolderID int `json:"folder_id"` FolderID int `json:"folder_id"`
} }
// MoveFileRequest is the request structure of the corresponding request
type MoveFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"destination_folder_id"`
Rename string `json:"rename,omitempty"`
}
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"folder_id"`
Rename string `json:"rename,omitempty"`
}
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request // GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct { type GetUploadNodeResponse struct {
ID string `json:"id"` ID string `json:"id"`
@@ -134,7 +69,7 @@ type SharedFolderResponse []SharedFile
type SharedFile struct { type SharedFile struct {
Filename string `json:"filename"` Filename string `json:"filename"`
Link string `json:"link"` Link string `json:"link"`
Size int64 `json:"size"` Size int `json:"size"`
} }
// EndFileUploadResponse is the response structure of the corresponding request // EndFileUploadResponse is the response structure of the corresponding request
@@ -151,13 +86,14 @@ type EndFileUploadResponse struct {
// File is the structure how 1Fichier returns a File // File is the structure how 1Fichier returns a File
type File struct { type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"` CDN int `json:"cdn"`
Checksum string `json:"checksum"` Checksum string `json:"checksum"`
ContentType string `json:"content-type"` ContentType string `json:"content-type"`
Date string `json:"date"` Date string `json:"date"`
Filename string `json:"filename"` Filename string `json:"filename"`
Pass int `json:"pass"` Pass int `json:"pass"`
Size int64 `json:"size"` Size int `json:"size"`
URL string `json:"url"` URL string `json:"url"`
} }

View File

@@ -1,409 +0,0 @@
// Package api has type definitions for filefabric
//
// Converted from the API responses with help from https://mholt.github.io/json-to-go/
package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
)
const (
// TimeFormat for parameters (UTC)
timeFormatParameters = `2006-01-02 15:04:05`
// "2020-08-11 10:10:04" for JSON parsing
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
return []byte(timeString), nil
}
var zeroTime = []byte(`"0000-00-00 00:00:00"`)
// UnmarshalJSON turns JSON into a Time (in UTC)
func (t *Time) UnmarshalJSON(data []byte) error {
// Set a Zero time.Time if we receive a zero time input
if bytes.Equal(data, zeroTime) {
*t = Time(time.Time{})
return nil
}
newT, err := time.Parse(timeFormatJSON, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// String turns a Time into a string in UTC suitable for the API
// parameters
func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
Message string `json:"statusmessage"`
TaskID string `json:"taskid"`
// Warning string `json:"warning"` // obsolete
}
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}
// OK returns true if the status is all good
func (e *Status) OK() bool {
return e.Code == "ok"
}
// GetCode returns the status code if any
func (e *Status) GetCode() string {
return e.Code
}
// OKError defines an interface for items which can be OK or be an error
type OKError interface {
error
OK() bool
GetCode() string
}
// Check Status satisfies the OKError interface
var _ OKError = (*Status)(nil)
// EmptyResponse is response which just returns the error condition
type EmptyResponse struct {
Status
}
// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
type GetTokenByAuthTokenResponse struct {
Status
Token string `json:"token"`
UserID string `json:"userid"`
AllowLoginRemember string `json:"allowloginremember"`
LastLogin Time `json:"lastlogin"`
AutoLoginCode string `json:"autologincode"`
}
// ApplianceInfo is the response to getApplianceInfo
type ApplianceInfo struct {
Status
Sitetitle string `json:"sitetitle"`
OauthLoginSupport string `json:"oauthloginsupport"`
IsAppliance string `json:"isappliance"`
SoftwareVersion string `json:"softwareversion"`
SoftwareVersionLabel string `json:"softwareversionlabel"`
}
// GetFolderContentsResponse is returned from getFolderContents
type GetFolderContentsResponse struct {
Status
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From Int `json:"from"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`
// Curfolder Item `json:"curfolder"` - sometimes returned as "ROOT"?
Parents []Item `json:"parents"`
CustomPermissions CustomPermissions `json:"custompermissions"`
}
// ItemType determine whether it is a file or a folder
type ItemType uint8
// Types of things in Item
const (
ItemTypeFile ItemType = 0
ItemTypeFolder ItemType = 1
)
// Item ia a File or a Folder
type Item struct {
ID string `json:"fi_id"`
PID string `json:"fi_pid"`
// UID string `json:"fi_uid"`
Name string `json:"fi_name"`
// S3Name string `json:"fi_s3name"`
// Extension string `json:"fi_extension"`
// Description string `json:"fi_description"`
Type ItemType `json:"fi_type,string"`
// Created Time `json:"fi_created"`
Size int64 `json:"fi_size,string"`
ContentType string `json:"fi_contenttype"`
// Tags string `json:"fi_tags"`
// MainCode string `json:"fi_maincode"`
// Public int `json:"fi_public,string"`
// Provider string `json:"fi_provider"`
// ProviderFolder string `json:"fi_providerfolder"` // folder
// Encrypted int `json:"fi_encrypted,string"`
// StructType string `json:"fi_structtype"`
// Bname string `json:"fi_bname"` // folder
// OrgID string `json:"fi_orgid"`
// Favorite int `json:"fi_favorite,string"`
// IspartOf string `json:"fi_ispartof"` // folder
Modified Time `json:"fi_modified"`
// LastAccessed Time `json:"fi_lastaccessed"`
// Hits int64 `json:"fi_hits,string"`
// IP string `json:"fi_ip"` // folder
// BigDescription string `json:"fi_bigdescription"`
LocalTime Time `json:"fi_localtime"`
// OrgfolderID string `json:"fi_orgfolderid"`
// StorageIP string `json:"fi_storageip"` // folder
// RemoteTime Time `json:"fi_remotetime"`
// ProviderOptions string `json:"fi_provideroptions"`
// Access string `json:"fi_access"`
// Hidden string `json:"fi_hidden"` // folder
// VersionOf string `json:"fi_versionof"`
Trash bool `json:"trash"`
// Isbucket string `json:"isbucket"` // filelist
SubFolders int64 `json:"subfolders"` // folder
}
// ItemFields is a | separated list of fields in Item
var ItemFields = mustFields(Item{})
// fields returns the JSON fields in use by opt as a | separated
// string.
func fields(opt interface{}) (pipeTags string, err error) {
var tags []string
def := reflect.ValueOf(opt)
defType := def.Type()
for i := 0; i < def.NumField(); i++ {
field := defType.Field(i)
tag, ok := field.Tag.Lookup("json")
if !ok {
continue
}
if comma := strings.IndexRune(tag, ','); comma >= 0 {
tag = tag[:comma]
}
if tag == "" {
continue
}
tags = append(tags, tag)
}
return strings.Join(tags, "|"), nil
}
// mustFields returns the JSON fields in use by opt as a | separated
// string. It panics on failure.
func mustFields(opt interface{}) string {
tags, err := fields(opt)
if err != nil {
panic(err)
}
return tags
}
// CustomPermissions is returned as part of GetFolderContentsResponse
type CustomPermissions struct {
Upload string `json:"upload"`
CreateSubFolder string `json:"createsubfolder"`
Rename string `json:"rename"`
Delete string `json:"delete"`
Move string `json:"move"`
ManagePermissions string `json:"managepermissions"`
ListOnly string `json:"listonly"`
VisibleInTrash string `json:"visibleintrash"`
}
// DoCreateNewFolderResponse is response from foCreateNewFolder
type DoCreateNewFolderResponse struct {
Status
Item Item `json:"file"`
}
// DoInitUploadResponse is response from doInitUpload
type DoInitUploadResponse struct {
Status
ProviderID string `json:"providerid"`
UploadCode string `json:"uploadcode"`
FileType string `json:"filetype"`
DirectUploadSupport string `json:"directuploadsupport"`
ResumeAllowed string `json:"resumeallowed"`
}
// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
//
// Sometimes the response is returned as XML and sometimes as JSON
type UploaderResponse struct {
FileSize int64 `xml:"filesize" json:"filesize,string"`
MD5 string `xml:"md5" json:"md5"`
Success string `xml:"success" json:"success"`
}
// UploadStatus is returned from getUploadStatus
type UploadStatus struct {
Status
UploadCode string `json:"uploadcode"`
Metafile string `json:"metafile"`
Percent int `json:"percent,string"`
Uploaded int64 `json:"uploaded,string"`
Size int64 `json:"size,string"`
Filename string `json:"filename"`
Nofile string `json:"nofile"`
Completed string `json:"completed"`
Completsuccess string `json:"completsuccess"`
Completerror string `json:"completerror"`
}
// DoCompleteUploadResponse is the response to doCompleteUpload
type DoCompleteUploadResponse struct {
Status
UploadedSize int64 `json:"uploadedsize,string"`
StorageIP string `json:"storageip"`
UploadedName string `json:"uploadedname"`
// Versioned []interface{} `json:"versioned"`
// VersionedID int `json:"versionedid"`
// Comment interface{} `json:"comment"`
File Item `json:"file"`
// UsSize string `json:"us_size"`
// PaSize string `json:"pa_size"`
// SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// Providers is returned as part of UploadResponse
type Providers struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Private string `json:"private"`
Limit string `json:"limit"`
Percent int `json:"percent"`
}
// Total is returned as part of UploadResponse
type Total struct {
Max string `json:"max"`
Used string `json:"used"`
ID string `json:"id"`
Priused string `json:"priused"`
Primax string `json:"primax"`
Limit string `json:"limit"`
Percent int `json:"percent"`
Pripercent int `json:"pripercent"`
}
// UploadResponse is returned as part of SpaceInfo
type UploadResponse struct {
Providers []Providers `json:"providers"`
Total Total `json:"total"`
}
// SpaceInfo is returned as part of DoCompleteUploadResponse
type SpaceInfo struct {
Response UploadResponse `json:"response"`
Status string `json:"status"`
}
// DeleteResponse is returned from doDeleteFile
type DeleteResponse struct {
Status
Deleted []string `json:"deleted"`
Errors []interface{} `json:"errors"`
ID string `json:"fi_id"`
BackgroundTask int `json:"backgroundtask"`
UsSize string `json:"us_size"`
PaSize string `json:"pa_size"`
//SpaceInfo SpaceInfo `json:"spaceinfo"`
}
// FileResponse is returned from doRenameFile
type FileResponse struct {
Status
Item Item `json:"file"`
Exists string `json:"exists"`
}
// MoveFilesResponse is returned from doMoveFiles
type MoveFilesResponse struct {
Status
Filesleft string `json:"filesleft"`
Addedtobackground string `json:"addedtobackground"`
Moved string `json:"moved"`
Item Item `json:"file"`
IDs []string `json:"fi_ids"`
Length int `json:"length"`
DirID string `json:"dir_id"`
MovedObjects []Item `json:"movedobjects"`
// FolderTasks []interface{} `json:"foldertasks"`
}
// TasksResponse is the response to getUserBackgroundTasks
type TasksResponse struct {
Status
Tasks []Task `json:"tasks"`
Total string `json:"total"`
}
// BtData is part of TasksResponse
type BtData struct {
Callback string `json:"callback"`
}
// Task describes a task returned in TasksResponse
type Task struct {
BtID string `json:"bt_id"`
UsID string `json:"us_id"`
BtType string `json:"bt_type"`
BtData BtData `json:"bt_data"`
BtStatustext string `json:"bt_statustext"`
BtStatusdata string `json:"bt_statusdata"`
BtMessage string `json:"bt_message"`
BtProcent string `json:"bt_procent"`
BtAdded string `json:"bt_added"`
BtStatus string `json:"bt_status"`
BtCompleted string `json:"bt_completed"`
BtTitle string `json:"bt_title"`
BtCredentials string `json:"bt_credentials"`
BtHidden string `json:"bt_hidden"`
BtAutoremove string `json:"bt_autoremove"`
BtDevsite string `json:"bt_devsite"`
BtPriority string `json:"bt_priority"`
BtReport string `json:"bt_report"`
BtSitemarker string `json:"bt_sitemarker"`
BtExecuteafter string `json:"bt_executeafter"`
BtCompletestatus string `json:"bt_completestatus"`
BtSubtype string `json:"bt_subtype"`
BtCanceled string `json:"bt_canceled"`
Callback string `json:"callback"`
CanBeCanceled bool `json:"canbecanceled"`
CanBeRestarted bool `json:"canberestarted"`
Type string `json:"type"`
Status string `json:"status"`
Settings string `json:"settings"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test filefabric filesystem interface
package filefabric_test
import (
"testing"
"github.com/rclone/rclone/backend/filefabric"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileFabric:",
NilObject: (*filefabric.Object)(nil),
})
}

View File

@@ -5,166 +5,92 @@ import (
"context" "context"
"crypto/tls" "crypto/tls"
"io" "io"
"net"
"net/textproto" "net/textproto"
"os"
"path" "path"
"runtime"
"strings"
"sync" "sync"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
) )
var (
currentUser = env.CurrentUser()
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "ftp", Name: "ftp",
Description: "FTP Connection", Description: "FTP Connection",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{
Name: "host", {
Help: "FTP host to connect to", Name: "host",
Required: true, Help: "FTP host to connect to",
Examples: []fs.OptionExample{{ Required: true,
Value: "ftp.example.com", Examples: []fs.OptionExample{{
Help: "Connect to ftp.example.com", Value: "ftp.example.com",
}}, Help: "Connect to ftp.example.com",
}, { }},
Name: "user", }, {
Help: "FTP username, leave blank for current username, " + currentUser, Name: "user",
}, { Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Name: "port", }, {
Help: "FTP port, leave blank to use default (21)", Name: "port",
}, { Help: "FTP port, leave blank to use default (21)",
Name: "pass", }, {
Help: "FTP password", Name: "pass",
IsPassword: true, Help: "FTP password",
Required: true, IsPassword: true,
}, { Required: true,
Name: "tls", }, {
Help: `Use Implicit FTPS (FTP over TLS) Name: "tls",
When using implicit FTP over TLS the client connects using TLS Help: "Use FTP over TLS (Implicit)",
right from the start which breaks compatibility with Default: false,
non-TLS-aware servers. This is usually served over port 990 rather }, {
than port 21. Cannot be used in combination with explicit FTP.`, Name: "concurrency",
Default: false, Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
}, { Default: 0,
Name: "explicit_tls", Advanced: true,
Help: `Use Explicit FTPS (FTP over TLS) }, {
When using explicit FTP over TLS the client explicitly requests Name: "no_check_certificate",
security from the server in order to upgrade a plain text connection Help: "Do not verify the TLS certificate of the server",
to an encrypted one. Cannot be used in combination with implicit FTP.`, Default: false,
Default: false, Advanced: true,
}, { },
Name: "concurrency", },
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "disable_mlsd",
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "close_timeout",
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
}},
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"` TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"` Concurrency int `config:"concurrency"`
Concurrency int `config:"concurrency"` SkipVerifyTLSCert bool `config:"no_check_certificate"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed options opt Options // parsed options
ci *fs.ConfigInfo // global config features *fs.Features // optional features
features *fs.Features // optional features
url string url string
user string user string
pass string pass string
dialAddr string dialAddr string
poolMu sync.Mutex poolMu sync.Mutex
pool []*ftp.ServerConn pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
} }
// Object describes an FTP file // Object describes an FTP file
@@ -204,119 +130,36 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// Enable debugging output
type debugLog struct {
mu sync.Mutex
auth bool
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
//
// This writes debug info to the log
func (dl *debugLog) Write(p []byte) (n int, err error) {
dl.mu.Lock()
defer dl.mu.Unlock()
_, file, _, ok := runtime.Caller(1)
direction := "FTP Rx"
if ok && strings.Contains(file, "multi") {
direction = "FTP Tx"
}
lines := strings.Split(string(p), "\r\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
for _, line := range lines {
if !dl.auth && strings.HasPrefix(line, "PASS") {
fs.Debugf(direction, "PASS *****")
continue
}
fs.Debugf(direction, "%q", line)
}
return len(p), nil
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
// Make ftp library dial with fshttp dialer optionally using TLS
dial := func(network, address string) (conn net.Conn, err error) {
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
}
return
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf tlsConfig := &tls.Config{
// as a trigger for sending PSBZ and PROT options to server. ServerName: f.opt.Host,
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf)) InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
} }
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} }
if f.opt.DisableEPSV { c, err := ftp.Dial(f.dialAddr, ftpConfig...)
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
if f.opt.DisableMLSD {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledMLSD(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
err = f.pacer.Call(func() (bool, error) {
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
return shouldRetry(ctx, err)
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
return shouldRetry(ctx, err)
}
return false, nil
})
if err != nil { if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr) fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
} }
return c, err err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return c, nil
} }
// Get an FTP connection from the pool, or open a new one // Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 { if f.opt.Concurrency > 0 {
f.tokens.Get() f.tokens.Get()
} }
accounting.LimitTPS(ctx)
f.poolMu.Lock() f.poolMu.Lock()
if len(f.pool) > 0 { if len(f.pool) > 0 {
c = f.pool[0] c = f.pool[0]
@@ -326,11 +169,7 @@ func (f *Fs) getFtpConnection(ctx context.Context) (c *ftp.ServerConn, err error
if c != nil { if c != nil {
return c, nil return c, nil
} }
c, err = f.ftpConnection(ctx) return f.ftpConnection()
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
} }
// Return an FTP connection to the pool // Return an FTP connection to the pool
@@ -343,13 +182,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 { if f.opt.Concurrency > 0 {
defer f.tokens.Put() defer f.tokens.Put()
} }
if pc == nil {
return
}
c := *pc c := *pc
if c == nil {
return
}
*pc = nil *pc = nil
if err != nil { if err != nil {
// If not a regular FTP error code then check the connection // If not a regular FTP error code then check the connection
@@ -365,34 +198,12 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
} }
f.poolMu.Lock() f.poolMu.Lock()
f.pool = append(f.pool, c) f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock() f.poolMu.Unlock()
} }
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if cErr := c.Quit(); cErr != nil {
err = cErr
}
f.pool[i] = nil
}
f.pool = nil
return err
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) { func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
@@ -406,7 +217,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
} }
user := opt.User user := opt.User
if user == "" { if user == "" {
user = currentUser user = os.Getenv("USER")
} }
port := opt.Port port := opt.Port
if port == "" { if port == "" {
@@ -418,40 +229,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS { if opt.TLS {
protocol = "ftps://" protocol = "ftps://"
} }
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
}
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
url: u, url: u,
user: user, user: user,
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency), tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
// Make a connection and pool it to return errors early // Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "NewFs") return nil, errors.Wrap(err, "NewFs")
} }
@@ -478,12 +271,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
return f, err return f, err
} }
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file // translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error { func translateErrorFile(err error) error {
switch errX := err.(type) { switch errX := err.(type) {
@@ -508,51 +295,23 @@ func translateErrorDir(err error) error {
return err return err
} }
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func (f *Fs) entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func (f *Fs) dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return f.opt.Enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote) fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
Type: ftp.EntryTypeFolder,
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath) dir := path.Dir(fullPath)
base := path.Base(fullPath) base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "findItem") return nil, errors.Wrap(err, "findItem")
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for _, file := range files { for _, file := range files {
f.entryToStandard(file)
if file.Name == base { if file.Name == base {
return file, nil return file, nil
} }
@@ -564,7 +323,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -586,8 +345,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
} }
// dirExists checks the directory pointed to by remote exists or not // dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) { func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
return false, errors.Wrap(err, "dirExists") return false, errors.Wrap(err, "dirExists")
} }
@@ -607,8 +366,8 @@ func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err err
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
@@ -619,7 +378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
resultchan := make(chan []*ftp.Entry, 1) resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1) errchan := make(chan error, 1)
go func() { go func() {
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir))) result, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
errchan <- err errchan <- err
@@ -629,7 +388,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}() }()
// Wait for List for up to Timeout seconds // Wait for List for up to Timeout seconds
timer := time.NewTimer(f.ci.TimeoutOrInfinite()) timer := time.NewTimer(fs.Config.Timeout)
select { select {
case listErr = <-errchan: case listErr = <-errchan:
timer.Stop() timer.Stop()
@@ -646,7 +405,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.
if len(files) == 0 { if len(files) == 0 {
exists, err := f.dirExists(ctx, dir) exists, err := f.dirExists(dir)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
@@ -656,7 +415,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
for i := range files { for i := range files {
object := files[i] object := files[i]
f.entryToStandard(object)
newremote := path.Join(dir, object.Name) newremote := path.Join(dir, object.Name)
switch object.Type { switch object.Type {
case ftp.EntryTypeFolder: case ftp.EntryTypeFolder:
@@ -699,7 +457,7 @@ func (f *Fs) Precision() time.Duration {
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(ctx, src.Remote()) err := f.mkParentDir(src.Remote())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed") return nil, errors.Wrap(err, "Put mkParentDir failed")
} }
@@ -717,30 +475,28 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) { func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err) // defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote) dir := path.Dir(remote)
base := path.Base(remote) base := path.Base(remote)
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getInfo") return nil, errors.Wrap(err, "getInfo")
} }
files, err := c.List(f.dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for i := range files { for i := range files {
file := files[i] if files[i].Name == base {
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{ info := &FileInfo{
Name: remote, Name: remote,
Size: file.Size, Size: files[i].Size,
ModTime: file.Time, ModTime: files[i].Time,
IsDir: file.Type == ftp.EntryTypeFolder, IsDir: files[i].Type == ftp.EntryTypeFolder,
} }
return info, nil return info, nil
} }
@@ -749,12 +505,11 @@ func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err erro
} }
// mkdir makes the directory and parents using unrooted paths // mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(ctx context.Context, abspath string) error { func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" { if abspath == "." || abspath == "/" {
return nil return nil
} }
fi, err := f.getInfo(ctx, abspath) fi, err := f.getInfo(abspath)
if err == nil { if err == nil {
if fi.IsDir { if fi.IsDir {
return nil return nil
@@ -764,15 +519,15 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
return errors.Wrapf(err, "mkdir %q failed", abspath) return errors.Wrapf(err, "mkdir %q failed", abspath)
} }
parent := path.Dir(abspath) parent := path.Dir(abspath)
err = f.mkdir(ctx, parent) err = f.mkdir(parent)
if err != nil { if err != nil {
return err return err
} }
c, connErr := f.getFtpConnection(ctx) c, connErr := f.getFtpConnection()
if connErr != nil { if connErr != nil {
return errors.Wrap(connErr, "mkdir") return errors.Wrap(connErr, "mkdir")
} }
err = c.MakeDir(f.dirFromStandardPath(abspath)) err = c.MakeDir(abspath)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
switch errX := err.(type) { switch errX := err.(type) {
case *textproto.Error: case *textproto.Error:
@@ -788,27 +543,27 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
// mkParentDir makes the parent of remote if necessary and any // mkParentDir makes the parent of remote if necessary and any
// directories above that // directories above that
func (f *Fs) mkParentDir(ctx context.Context, remote string) error { func (f *Fs) mkParentDir(remote string) error {
parent := path.Dir(remote) parent := path.Dir(remote)
return f.mkdir(ctx, path.Join(f.root, parent)) return f.mkdir(path.Join(f.root, parent))
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(ctx, root) return f.mkdir(root)
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
} }
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir))) err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
return translateErrorDir(err) return translateErrorDir(err)
} }
@@ -820,17 +575,17 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(ctx, remote) err := f.mkParentDir(remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed") return nil, errors.Wrap(err, "Move mkParentDir failed")
} }
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move") return nil, errors.Wrap(err, "Move")
} }
err = c.Rename( err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)), path.Join(srcObj.fs.root, srcObj.remote),
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)), path.Join(f.root, remote),
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -844,7 +599,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations. // using server side move operations.
// //
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
@@ -861,7 +616,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.root, dstRemote) dstPath := path.Join(f.root, dstRemote)
// Check if destination exists // Check if destination exists
fi, err := f.getInfo(ctx, dstPath) fi, err := f.getInfo(dstPath)
if err == nil { if err == nil {
if fi.IsDir { if fi.IsDir {
return fs.ErrorDirExists return fs.ErrorDirExists
@@ -872,19 +627,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// Make sure the parent directory exists // Make sure the parent directory exists
err = f.mkdir(ctx, path.Dir(dstPath)) err = f.mkdir(path.Dir(dstPath))
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed") return errors.Wrap(err, "DirMove mkParentDir dst failed")
} }
// Do the move // Do the move
c, err := f.getFtpConnection(ctx) c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(err, "DirMove") return errors.Wrap(err, "DirMove")
} }
err = c.Rename( err = c.Rename(
f.dirFromStandardPath(srcPath), srcPath,
f.dirFromStandardPath(dstPath), dstPath,
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -962,31 +717,27 @@ func (f *ftpReadCloser) Close() error {
go func() { go func() {
errchan <- f.rc.Close() errchan <- f.rc.Close()
}() }()
// Wait for Close for up to 60 seconds by default // Wait for Close for up to 60 seconds
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout)) timer := time.NewTimer(60 * time.Second)
select { select {
case err = <-errchan: case err = <-errchan:
timer.Stop() timer.Stop()
case <-timer.C: case <-timer.C:
// if timer fired assume no error but connection dead // if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close") fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil return nil
} }
// if errors while reading or closing, dump the connection // if errors while reading or closing, dump the connection
if err != nil || f.err != nil { if err != nil || f.err != nil {
_ = f.c.Quit() _ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else { } else {
f.f.putFtpConnection(&f.c, nil) f.f.putFtpConnection(&f.c, nil)
} }
// mask the error if it was caused by a premature close // mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) { switch errX := err.(type) {
case *textproto.Error: case *textproto.Error:
switch errX.Code { switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend: case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil err = nil
} }
} }
@@ -1010,11 +761,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
} }
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset)) fd, err := c.RetrFrom(path, uint64(offset))
if err != nil { if err != nil {
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
@@ -1045,19 +796,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "Removed after failed upload: %v", err) fs.Debugf(o, "Removed after failed upload: %v", err)
} }
} }
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in) err = c.Stor(path, in)
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit() // toss this connection to avoid sync errors
remove() remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor") return errors.Wrap(err, "update stor")
} }
o.fs.putFtpConnection(&c, nil) o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(ctx, path) o.info, err = o.fs.getInfo(path)
if err != nil { if err != nil {
return errors.Wrap(err, "update getinfo") return errors.Wrap(err, "update getinfo")
} }
@@ -1069,18 +819,18 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
info, err := o.fs.getInfo(ctx, path) info, err := o.fs.getInfo(path)
if err != nil { if err != nil {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote) err = o.fs.Rmdir(ctx, o.remote)
} else { } else {
c, err := o.fs.getFtpConnection(ctx) c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return errors.Wrap(err, "Remove")
} }
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path)) err = c.Delete(path)
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
} }
return err return err
@@ -1092,6 +842,5 @@ var (
_ fs.Mover = &Fs{} _ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{} _ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
) )

View File

@@ -5,44 +5,13 @@ import (
"testing" "testing"
"github.com/rclone/rclone/backend/ftp" "github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:", RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil), NilObject: (*ftp.Object)(nil),
}) })
} }
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPPureftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

View File

@@ -19,9 +19,10 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"net/http" "net/http"
"os"
"path" "path"
"strconv"
"strings" "strings"
"time" "time"
@@ -36,8 +37,6 @@ import (
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
@@ -51,10 +50,10 @@ import (
const ( const (
rcloneClientID = "202264815644.apps.googleusercontent.com" rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw" rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormat = time.RFC3339Nano timeFormatIn = time.RFC3339
metaMtime = "mtime" // key to store mtime in metadata timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
) )
@@ -76,31 +75,33 @@ func init() {
Prefix: "gcs", Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)", Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file") saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials") saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous") if saFile != "" || saCreds != "" {
if saFile != "" || saCreds != "" || anonymous == "true" { return
return nil, nil }
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
} }
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number", Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, { }, {
Name: "service_account_file", Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, { }, {
Name: "service_account_credentials", Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth, Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, { }, {
Name: "object_acl", Name: "object_acl",
Help: "Access Control List for new objects.", Help: "Access Control List for new objects.",
@@ -240,36 +241,24 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, { }, {
Value: "COLDLINE", Value: "COLDLINE",
Help: "Coldline storage class", Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, { }, {
Value: "DURABLE_REDUCED_AVAILABILITY", Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class", Help: "Durable reduced availability storage class",
}}, }},
}, { }},
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ProjectNumber string `config:"project_number"` ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"` ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"` ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"` ObjectACL string `config:"object_acl"`
ObjectACL string `config:"object_acl"` BucketACL string `config:"bucket_acl"`
BucketACL string `config:"bucket_acl"` BucketPolicyOnly bool `config:"bucket_policy_only"`
BucketPolicyOnly bool `config:"bucket_policy_only"` Location string `config:"location"`
Location string `config:"location"` StorageClass string `config:"storage_class"`
StorageClass string `config:"storage_class"`
Enc encoder.MultiEncoder `config:"encoding"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -328,10 +317,7 @@ func (f *Fs) Features() *fs.Features {
} }
// shouldRetry determines whether a given err rates being retried // shouldRetry determines whether a given err rates being retried
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) { func shouldRetry(err error) (again bool, errOut error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
again = false again = false
if err != nil { if err != nil {
if fserrors.ShouldRetry(err) { if fserrors.ShouldRetry(err) {
@@ -363,8 +349,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath // split returns bucket and bucketPath from the rootRelativePath
// relative to f.root // relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) return bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
} }
// split returns bucket and bucketPath from the object // split returns bucket and bucketPath from the object
@@ -372,12 +357,12 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote) return o.fs.split(o.remote)
} }
func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) { func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error processing credentials") return nil, errors.Wrap(err, "error processing credentials")
} }
ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx)) ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
} }
@@ -388,7 +373,7 @@ func (f *Fs) setRoot(root string) {
} }
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client var oAuthClient *http.Client
// Parse config into Options struct // Parse config into Options struct
@@ -406,21 +391,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file // try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file") return nil, errors.Wrap(err, "error opening service account credentials file")
} }
opt.ServiceAccountCredentials = string(loadedCreds) opt.ServiceAccountCredentials = string(loadedCreds)
} }
if opt.Anonymous { if opt.ServiceAccountCredentials != "" {
oAuthClient = fshttp.NewClient(ctx) oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
} else if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials))
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account") return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
} }
} else { } else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil { if err != nil {
ctx := context.Background() ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
@@ -434,7 +417,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(), cache: bucket.NewCache(),
} }
f.setRoot(root) f.setRoot(root)
@@ -443,7 +426,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
}).Fill(ctx, f) }).Fill(f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
f.client = oAuthClient f.client = oAuthClient
@@ -454,10 +437,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if f.rootBucket != "" && f.rootDirectory != "" { if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do() _, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
newRoot := path.Dir(f.root) newRoot := path.Dir(f.root)
@@ -475,7 +457,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -483,7 +465,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
if info != nil { if info != nil {
o.setMetaData(info) o.setMetaData(info)
} else { } else {
err := o.readMetaData(ctx) // reads info and meta, returning an error err := o.readMetaData() // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -494,7 +476,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// listFn is called from list to handle an object. // listFn is called from list to handle an object.
@@ -522,8 +504,8 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
for { for {
var objects *storage.Objects var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do() objects, err = list.Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
if gErr, ok := err.(*googleapi.Error); ok { if gErr, ok := err.(*googleapi.Error); ok {
@@ -539,7 +521,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if !strings.HasSuffix(remote, "/") { if !strings.HasSuffix(remote, "/") {
continue continue
} }
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote) fs.Logf(f, "Odd name received %q", remote)
continue continue
@@ -555,18 +536,17 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
} }
for _, object := range objects.Items { for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name) if !strings.HasPrefix(object.Name, prefix) {
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
remote = remote[len(prefix):] remote := object.Name[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/") isDirectory := strings.HasSuffix(remote, "/")
if addBucket { if addBucket {
remote = path.Join(bucket, remote) remote = path.Join(bucket, remote)
} }
// is this a directory marker? // is this a directory marker?
if isDirectory { if isDirectory && object.Size == 0 {
continue // skip directory marker continue // skip directory marker
} }
err = fn(remote, object, false) err = fn(remote, object, false)
@@ -583,12 +563,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
// Convert a list item into a DirEntry // Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory { if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil return d, nil
} }
o, err := f.newObjectWithInfo(ctx, remote, object) o, err := f.newObjectWithInfo(remote, object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -599,7 +579,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects // List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
} }
@@ -625,14 +605,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do() buckets, err = listBuckets.Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, bucket := range buckets.Items { for _, bucket := range buckets.Items {
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{}) d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d) entries = append(entries, d)
} }
if buckets.NextPageToken == "" { if buckets.NextPageToken == "" {
@@ -684,7 +664,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
} }
@@ -751,8 +731,8 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// List something from the bucket to see if it exists. Doing it like this enables the use of a // List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details. // service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do() _, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
// Bucket already exists // Bucket already exists
@@ -786,8 +766,8 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
if !f.opt.BucketPolicyOnly { if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL) insertBucket.PredefinedAcl(f.opt.BucketACL)
} }
_, err = insertBucket.Context(ctx).Do() _, err = insertBucket.Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
}, nil) }, nil)
} }
@@ -803,8 +783,8 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
} }
return f.cache.Remove(bucket, func() error { return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do() err = f.svc.Buckets.Delete(bucket).Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
}) })
} }
@@ -814,7 +794,7 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond return time.Nanosecond
} }
// Copy src to this remote using server-side copy operations. // Copy src to this remote using server side copy operations.
// //
// This is stored with the remote path given // This is stored with the remote path given
// //
@@ -842,27 +822,20 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote, remote: remote,
} }
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil) var newObject *storage.Object
if !f.opt.BucketPolicyOnly { err = f.pacer.Call(func() (bool, error) {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL) copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
} if !f.opt.BucketPolicyOnly {
var rewriteResponse *storage.RewriteResponse copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
} }
if rewriteResponse.Done { newObject, err = copyObject.Do()
break return shouldRetry(err)
} })
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken) if err != nil {
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten) return nil, err
} }
// Set the metadata for the new object while we have it // Set the metadata for the new object while we have it
dstObj.setMetaData(rewriteResponse.Resource) dstObj.setMetaData(newObject)
return dstObj, nil return dstObj, nil
} }
@@ -921,7 +894,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available // read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime] mtimeString, ok := info.Metadata[metaMtime]
if ok { if ok {
modTime, err := time.Parse(timeFormat, mtimeString) modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil { if err == nil {
o.modTime = modTime o.modTime = modTime
return return
@@ -929,19 +902,8 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err) fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
} }
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time // Fallback to the Updated time
modTime, err := time.Parse(timeFormat, info.Updated) modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil { if err != nil {
fs.Logf(o, "Bad time decode: %v", err) fs.Logf(o, "Bad time decode: %v", err)
} else { } else {
@@ -950,11 +912,11 @@ func (o *Object) setMetaData(info *storage.Object) {
} }
// readObjectInfo reads the definition for an object // readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { func (o *Object) readObjectInfo() (object *storage.Object, err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do() object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
if gErr, ok := err.(*googleapi.Error); ok { if gErr, ok := err.(*googleapi.Error); ok {
@@ -970,11 +932,11 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
// readMetaData gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
// //
// it also sets the info // it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() { if !o.modTime.IsZero() {
return nil return nil
} }
object, err := o.readObjectInfo(ctx) object, err := o.readObjectInfo()
if err != nil { if err != nil {
return err return err
} }
@@ -987,7 +949,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err) // fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -998,24 +960,23 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object // Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string { func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1) metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormat) metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
return metadata return metadata
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// read the complete existing object first // read the complete existing object first
object, err := o.readObjectInfo(ctx) object, err := o.readObjectInfo()
if err != nil { if err != nil {
return err return err
} }
// Add the mtime to the existing metadata // Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil { if object.Metadata == nil {
object.Metadata = make(map[string]string, 1) object.Metadata = make(map[string]string, 1)
} }
object.Metadata[metaMtime] = modTime.Format(timeFormat) object.Metadata[metaMtime] = mtime
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
// Copy the object to itself to update the metadata // Copy the object to itself to update the metadata
// Using PATCH requires too many permissions // Using PATCH requires too many permissions
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -1025,8 +986,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
} }
newObject, err = copyObject.Context(ctx).Do() newObject, err = copyObject.Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return err return err
@@ -1042,7 +1003,7 @@ func (o *Object) Storable() bool {
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", o.url, nil) req, err := http.NewRequest("GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1057,7 +1018,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_ = res.Body.Close() // ignore error _ = res.Body.Close() // ignore error
} }
} }
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -1087,43 +1048,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime), Metadata: metadataFromModTime(modTime),
} }
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
case "x-goog-storage-class":
object.StorageClass = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name) insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
newObject, err = insertObject.Context(ctx).Do() newObject, err = insertObject.Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return err return err
@@ -1137,8 +1069,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do() err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
return shouldRetry(ctx, err) return shouldRetry(err)
}) })
return err return err
} }

View File

@@ -30,7 +30,7 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1) albums.add(a1)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1": a1, "1": a1,
@@ -39,7 +39,7 @@ func TestAlbumsAdd(t *testing.T) {
"one": a1, "one": a1,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one"}, "": []string{"one"},
}, albums.path) }, albums.path)
a2 := &api.Album{ a2 := &api.Album{
@@ -49,8 +49,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2) albums.add(a2)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2}, "two": []*api.Album{a2},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1": a1, "1": a1,
@@ -61,7 +61,7 @@ func TestAlbumsAdd(t *testing.T) {
"two": a2, "two": a2,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two"}, "": []string{"one", "two"},
}, albums.path) }, albums.path)
// Add a duplicate // Add a duplicate
@@ -72,8 +72,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2a) albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1": a1, "1": a1,
@@ -86,7 +86,7 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"}, "": []string{"one", "two {2}", "two {2a}"},
}, albums.path) }, albums.path)
// Add a sub directory // Add a sub directory
@@ -97,9 +97,9 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1sub) albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1": a1, "1": a1,
@@ -114,8 +114,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"}, "": []string{"one", "two {2}", "two {2a}"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
// Add a weird path // Add a weird path
@@ -126,10 +126,10 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a0) albums.add(a0)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"{0}": {a0}, "{0}": []*api.Album{a0},
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"0": a0, "0": a0,
@@ -146,8 +146,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}", "{0}"}, "": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
} }
@@ -181,9 +181,9 @@ func TestAlbumsDel(t *testing.T) {
albums.add(a1sub) albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1": a1, "1": a1,
@@ -198,16 +198,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"}, "": []string{"one", "two {2}", "two {2a}"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
albums.del(a1) albums.del(a1)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"2": a2, "2": a2,
@@ -220,16 +220,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"}, "": []string{"one", "two {2}", "two {2a}"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
albums.del(a2) albums.del(a2)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"2a": a2a, "2a": a2a,
@@ -240,16 +240,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a, "two {2a}": a2a,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one", "two {2a}"}, "": []string{"one", "two {2a}"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
albums.del(a2a) albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{ assert.Equal(t, map[string]*api.Album{
"1sub": a1sub, "1sub": a1sub,
@@ -258,16 +258,16 @@ func TestAlbumsDel(t *testing.T) {
"one/sub": a1sub, "one/sub": a1sub,
}, albums.byTitle) }, albums.byTitle)
assert.Equal(t, map[string][]string{ assert.Equal(t, map[string][]string{
"": {"one"}, "": []string{"one"},
"one": {"sub"}, "one": []string{"sub"},
}, albums.path) }, albums.path)
albums.del(a1sub) albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{ assert.Equal(t, map[string][]*api.Album{
"one": {a1}, "one": []*api.Album{a1},
"two": {a2, a2a}, "two": []*api.Album{a2, a2a},
"one/sub": {a1sub}, "one/sub": []*api.Album{a1sub},
}, albums.dupes) }, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID) assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle) assert.Equal(t, map[string]*api.Album{}, albums.byTitle)

View File

@@ -17,7 +17,7 @@ type Error struct {
Details ErrorDetails `json:"error"` Details ErrorDetails `json:"error"`
} }
// Error satisfies error interface // Error statisfies error interface
func (e *Error) Error() string { func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status) return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
} }

View File

@@ -8,6 +8,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
golog "log"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -20,6 +21,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api" "github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
@@ -77,38 +79,44 @@ func init() {
Prefix: "gphotos", Prefix: "gphotos",
Description: "Google Photos", Description: "Google Photos",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(name string, m configmap.Mapper) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct") fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
} }
switch config.State { // Fill in the scopes
case "": if opt.ReadOnly {
// Fill in the scopes oauthConfig.Scopes[0] = scopeReadOnly
if opt.ReadOnly { } else {
oauthConfig.Scopes[0] = scopeReadOnly oauthConfig.Scopes[0] = scopeReadWrite
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
} }
return nil, fmt.Errorf("unknown state %q", config.State)
// Do the oauth
err = oauthutil.Config("google photos", name, m, oauthConfig)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
}, },
Options: append(oauthutil.SharedOptions, []fs.Option{{ Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "read_only", Name: "read_only",
Default: false, Default: false,
Help: `Set to make the Google Photos backend read only. Help: `Set to make the Google Photos backend read only.
@@ -126,38 +134,14 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if them, so setting this flag when using rclone mount is recommended if
you want to read the media.`, you want to read the media.`,
Advanced: true, Advanced: true,
}, { }},
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}, {
Name: "include_archived",
Default: false,
Help: `Also view and download archived media.
By default rclone does not request archived media. Thus, when syncing,
archived media is not visible in directory listings or transferred.
Note that media in albums is always visible and synced, no matter
their archive status.
With this flag, archived media are always visible in directory
listings and transferred.
Without this flag, archived media will not be visible in directory
listings and won't be transferred.`,
Advanced: true,
}}...),
}) })
} }
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
ReadOnly bool `config:"read_only"` ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"` ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
} }
// Fs represents a remote storage server // Fs represents a remote storage server
@@ -218,15 +202,6 @@ func (f *Fs) dirTime() time.Time {
return f.startTime return f.startTime
} }
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
func (f *Fs) includeArchived() bool {
return f.opt.IncludeArchived
}
// retryErrorCodes is a slice of error codes that we will retry // retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{ var retryErrorCodes = []int{
429, // Too Many Requests. 429, // Too Many Requests.
@@ -239,10 +214,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err // shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience // deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { func shouldRetry(resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
@@ -252,10 +224,6 @@ func errorHandler(resp *http.Response) error {
if err != nil { if err != nil {
body = nil body = nil
} }
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{ var e = api.Error{
Details: api.ErrorDetails{ Details: api.ErrorDetails{
Code: resp.StatusCode, Code: resp.StatusCode,
@@ -270,7 +238,7 @@ func errorHandler(resp *http.Response) error {
} }
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -278,8 +246,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
baseClient := fshttp.NewClient(ctx) baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Box") return nil, errors.Wrap(err, "failed to configure Box")
} }
@@ -296,14 +264,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
unAuth: rest.NewClient(baseClient), unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL), srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts, ts: ts,
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(), startTime: time.Now(),
albums: map[bool]*albums{}, albums: map[bool]*albums{},
uploaded: dirtree.New(), uploaded: dirtree.New(),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
}).Fill(ctx, f) }).Fill(f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
_, _, pattern := patterns.match(f.root, "", true) _, _, pattern := patterns.match(f.root, "", true)
@@ -312,7 +280,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var leaf string var leaf string
f.root, leaf = path.Split(f.root) f.root, leaf = path.Split(f.root)
f.root = strings.TrimRight(f.root, "/") f.root = strings.TrimRight(f.root, "/")
_, err := f.NewObject(ctx, leaf) _, err := f.NewObject(context.TODO(), leaf)
if err == nil { if err == nil {
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -322,7 +290,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// fetchEndpoint gets the openid endpoint named from the Google config // fetchEndpoint gets the openid endpoint named from the Google config
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) { func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
// Get openID config without auth // Get openID config without auth
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
@@ -330,8 +298,8 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
} }
var openIDconfig map[string]interface{} var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig) resp, err := f.unAuth.CallJSON(&opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return "", errors.Wrap(err, "couldn't read openID config") return "", errors.Wrap(err, "couldn't read openID config")
@@ -348,7 +316,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
// UserInfo fetches info about the current user with oauth2 // UserInfo fetches info about the current user with oauth2
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) { func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint") endpoint, err := f.fetchEndpoint("userinfo_endpoint")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -359,8 +327,8 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
RootURL: endpoint, RootURL: endpoint,
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo) resp, err := f.srv.CallJSON(&opts, nil, &userInfo)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't read user info") return nil, errors.Wrap(err, "couldn't read user info")
@@ -370,7 +338,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
// Disconnect kills the token and refresh token // Disconnect kills the token and refresh token
func (f *Fs) Disconnect(ctx context.Context) (err error) { func (f *Fs) Disconnect(ctx context.Context) (err error) {
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint") endpoint, err := f.fetchEndpoint("revocation_endpoint")
if err != nil { if err != nil {
return err return err
} }
@@ -390,8 +358,8 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
} }
var res interface{} var res interface{}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res) resp, err := f.srv.CallJSON(&opts, nil, &res)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't revoke token") return errors.Wrap(err, "couldn't revoke token")
@@ -455,7 +423,7 @@ func findID(name string) string {
// list the albums into an internal cache // list the albums into an internal cache
// FIXME cache invalidation // FIXME cache invalidation
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) { func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
f.albumsMu.Lock() f.albumsMu.Lock()
defer f.albumsMu.Unlock() defer f.albumsMu.Unlock()
all, ok := f.albums[shared] all, ok := f.albums[shared]
@@ -477,8 +445,8 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var result api.ListAlbums var result api.ListAlbums
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't list albums") return nil, errors.Wrap(err, "couldn't list albums")
@@ -514,26 +482,20 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// //
// Set recurse to read sub directories // Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) { func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/mediaItems:search", Path: "/mediaItems:search",
} }
filter.PageSize = listChunks filter.PageSize = listChunks
filter.PageToken = "" filter.PageToken = ""
if filter.AlbumID == "" { // album ID and filters cannot be set together, else error 400 INVALID_ARGUMENT
if filter.Filters == nil {
filter.Filters = &api.Filters{}
}
filter.Filters.IncludeArchivedMedia = &f.opt.IncludeArchived
}
lastID := "" lastID := ""
for { for {
var result api.MediaItems var result api.MediaItems
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result) resp, err = f.srv.CallJSON(&opts, &filter, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't list files") return errors.Wrap(err, "couldn't list files")
@@ -581,7 +543,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) { func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
// List the objects // List the objects
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error { err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory) entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -676,8 +638,8 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var result api.Album var result api.Album
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result) resp, err = f.srv.CallJSON(&opts, request, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't create album") return nil, errors.Wrap(err, "couldn't create album")
@@ -692,7 +654,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) { func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
f.createMu.Lock() f.createMu.Lock()
defer f.createMu.Unlock() defer f.createMu.Unlock()
albums, err := f.listAlbums(ctx, false) albums, err := f.listAlbums(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -746,7 +708,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return err return err
} }
albumTitle := match[1] albumTitle := match[1]
allAlbums, err := f.listAlbums(ctx, false) allAlbums, err := f.listAlbums(false)
if err != nil { if err != nil {
return err return err
} }
@@ -811,8 +773,8 @@ func (o *Object) Size() int64 {
RootURL: o.downloadURL(), RootURL: o.downloadURL(),
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
fs.Debugf(o, "Reading size failed: %v", err) fs.Debugf(o, "Reading size failed: %v", err)
@@ -862,8 +824,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var item api.MediaItem var item api.MediaItem
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item) resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't get media item") return errors.Wrap(err, "couldn't get media item")
@@ -939,8 +901,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options, Options: options,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -981,9 +943,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken // Upload the media item in exchange for an UploadToken
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/uploads", Path: "/uploads",
Options: options,
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName, "X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw", "X-Goog-Upload-Protocol": "raw",
@@ -993,12 +954,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var token []byte var token []byte
var resp *http.Response var resp *http.Response
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
if err != nil { if err != nil {
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
} }
token, err = rest.ReadBody(resp) token, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't upload file") return errors.Wrap(err, "couldn't upload file")
@@ -1025,8 +986,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
var result api.BatchCreateResponse var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result) resp, err = o.fs.srv.CallJSON(&opts, request, &result)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "failed to create media item") return errors.Wrap(err, "failed to create media item")
@@ -1042,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage // Add upload to internal storage
if pattern.isUpload { if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o) o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
} }
return nil return nil
} }
@@ -1070,8 +1029,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
return shouldRetry(ctx, resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return errors.Wrap(err, "couldn't delete item from album") return errors.Wrap(err, "couldn't delete item from album")

View File

@@ -12,7 +12,6 @@ import (
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -27,6 +26,17 @@ const (
fileNameUpload = "rclone-test-image2.jpg" fileNameUpload = "rclone-test-image2.jpg"
) )
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
ctx := context.Background() ctx := context.Background()
fstest.Initialise() fstest.Initialise()
@@ -35,14 +45,14 @@ func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" { if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:" *fstest.RemoteName = "TestGooglePhotos:"
} }
f, err := fs.NewFs(ctx, *fstest.RemoteName) f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile { if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err)) t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
} }
require.NoError(t, err) require.NoError(t, err)
// Create local Fs pointing at testfiles // Create local Fs pointing at testfiles
localFs, err := fs.NewFs(ctx, "testfiles") localFs, err := fs.NewFs("testfiles")
require.NoError(t, err) require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) { t.Run("CreateAlbum", func(t *testing.T) {
@@ -56,7 +66,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()
@@ -115,7 +125,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String()) assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
}) })
// Check it is there in the date/month/year hierarchy // Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder // 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) { checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath) entries, err := f.List(ctx, objPath)
@@ -155,7 +165,7 @@ func TestIntegration(t *testing.T) {
}) })
t.Run("NewFsIsFile", func(t *testing.T) { t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(ctx, *fstest.RemoteName+remote) fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err) assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote) leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf) o, err := fNew.NewObject(ctx, leaf)
@@ -221,7 +231,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
in, err := srcObj.Open(ctx) in, err := srcObj.Open(ctx)
require.NoError(t, err) require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote)) dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote()) assert.Equal(t, remote, dstObj.Remote())
_ = in.Close() _ = in.Close()

View File

@@ -20,11 +20,9 @@ import (
// file pattern parsing // file pattern parsing
type lister interface { type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error) listAlbums(shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time dirTime() time.Time
startYear() int
includeArchived() bool
} }
// dirPattern describes a single directory pattern // dirPattern describes a single directory pattern
@@ -54,7 +52,6 @@ var patterns = dirPatterns{
fs.NewDir(prefix+"album", f.dirTime()), fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()), fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()), fs.NewDir(prefix+"upload", f.dirTime()),
fs.NewDir(prefix+"feature", f.dirTime()),
}, nil }, nil
}, },
}, },
@@ -192,28 +189,6 @@ var patterns = dirPatterns{
re: `^shared-album/(.+?)/([^/]+)$`, re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true, isFile: true,
}, },
{
re: `^feature$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return fs.DirEntries{
fs.NewDir(prefix+"favorites", f.dirTime()),
}, nil
},
},
{
re: `^feature/favorites$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
filter := featureFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^feature/favorites/([^/]+)$`,
isFile: true,
},
}.mustCompile() }.mustCompile()
// mustCompile compiles the regexps in the dirPatterns // mustCompile compiles the regexps in the dirPatterns
@@ -225,7 +200,7 @@ func (ds dirPatterns) mustCompile() dirPatterns {
return ds return ds
} }
// match finds the path passed in the matching structure and // match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil. // returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) { func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/") itemPath = strings.Trim(itemPath, "/")
@@ -247,10 +222,11 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil return nil, "", nil
} }
// Return the years from startYear to today // Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) { func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year() currentYear := f.dirTime().Year()
for year := f.startYear(); year <= currentYear; year++ { for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime())) entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
} }
return entries, nil return entries, nil
@@ -314,31 +290,13 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
return sf, nil return sf, nil
} }
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
sf = api.SearchFilter{
Filters: &api.Filters{
FeatureFilter: &api.FeatureFilter{
IncludedFeatures: []string{
"FAVORITES",
},
},
},
}
return sf
}
// Turns an albumPath into entries // Turns an albumPath into entries
// //
// These can either be synthetic directory entries if the album path // These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of // is a prefix of another album, or actual files, or a combination of
// the two. // the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) { func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared) albums, err := f.listAlbums(shared)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
} }
// mock listAlbums for testing // mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) { func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
return f.albums, nil return f.albums, nil
} }
@@ -59,16 +59,6 @@ func (f *testLister) dirTime() time.Time {
return startTime return startTime
} }
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
// mock includeArchived for testing
func (f *testLister) includeArchived() bool {
return false
}
func TestPatternMatch(t *testing.T) { func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct { for testNumber, test := range []struct {
// input // input
@@ -160,38 +150,6 @@ func TestPatternMatch(t *testing.T) {
wantPrefix: "file.jpg/", wantPrefix: "file.jpg/",
wantPattern: &patterns[5], wantPattern: &patterns[5],
}, },
{
root: "",
itemPath: "feature",
isFile: false,
wantMatch: []string{"feature"},
wantPrefix: "feature/",
wantPattern: &patterns[23],
},
{
root: "feature/favorites",
itemPath: "",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "",
wantPattern: &patterns[24],
},
{
root: "feature",
itemPath: "favorites",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "favorites/",
wantPattern: &patterns[24],
},
{
root: "feature/favorites",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[25],
},
} { } {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) { t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile) gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)

View File

@@ -1,320 +0,0 @@
// +build !plan9
package hdfs
import (
"context"
"fmt"
"io"
"os"
"os/user"
"path"
"strings"
"time"
"github.com/colinmarc/hdfs/v2"
krb "github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
)
// Fs represents a HDFS server
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
}
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
if configPath == "" {
configPath = "/etc/krb5.conf"
}
cfg, err := config.Load(configPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
if strings.Contains(ccachePath, ":") {
if strings.HasPrefix(ccachePath, "FILE:") {
ccachePath = strings.SplitN(ccachePath, ":", 2)[1]
} else {
return nil, fmt.Errorf("unusable ccache: %s", ccachePath)
}
} else if ccachePath == "" {
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
client, err := krb.NewFromCCache(ccache, cfg)
if err != nil {
return nil, err
}
return client, nil
}
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
options := hdfs.ClientOptions{
Addresses: []string{opt.Namenode},
UseDatanodeHostname: false,
}
if opt.ServicePrincipalName != "" {
options.KerberosClient, err = getKerberosClient()
if err != nil {
return nil, fmt.Errorf("Problem with kerberos authentication: %s", err)
}
options.KerberosServicePrincipleName = opt.ServicePrincipalName
if opt.DataTransferProtection != "" {
options.DataTransferProtection = opt.DataTransferProtection
}
} else {
options.User = opt.Username
}
client, err := hdfs.NewClient(options)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
info, err := f.client.Stat(f.realpath(""))
if err == nil && !info.IsDir() {
f.root = path.Dir(f.root)
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("hdfs://%s", f.opt.Namenode)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// NewObject finds file at remote or return fs.ErrorObjectNotFound
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
realpath := f.realpath(remote)
fs.Debugf(f, "new [%s]", realpath)
info, err := f.ensureFile(realpath)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
size: info.Size(),
modTime: info.ModTime(),
}, nil
}
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
realpath := f.realpath(dir)
fs.Debugf(f, "list [%s]", realpath)
err = f.ensureDirectory(realpath)
if err != nil {
return nil, err
}
list, err := f.client.ReadDir(realpath)
if err != nil {
return nil, err
}
for _, x := range list {
stdName := f.opt.Enc.ToStandardName(x.Name())
remote := path.Join(dir, stdName)
if x.IsDir() {
entries = append(entries, fs.NewDir(remote, x.ModTime()))
} else {
entries = append(entries, &Object{
fs: f,
remote: remote,
size: x.Size(),
modTime: x.ModTime()})
}
}
return entries, nil
}
// Put the object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes a directory
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir [%s]", f.realpath(dir))
return f.client.MkdirAll(f.realpath(dir), 0755)
}
// Rmdir deletes the directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "rmdir [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
// do not remove empty directory
list, err := f.client.ReadDir(realpath)
if err != nil {
return err
}
if len(list) > 0 {
return fs.ErrorDirectoryNotEmpty
}
return f.client.Remove(realpath)
}
// Purge deletes all the files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
realpath := f.realpath(dir)
fs.Debugf(f, "purge [%s]", realpath)
err := f.ensureDirectory(realpath)
if err != nil {
return err
}
return f.client.RemoveAll(realpath)
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := f.client.StatFs()
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}
func (f *Fs) ensureDirectory(realpath string) error {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return fs.ErrorDirNotFound
}
if err != nil {
return err
}
if !info.IsDir() {
return fs.ErrorDirNotFound
}
return nil
}
func (f *Fs) ensureFile(realpath string) (os.FileInfo, error) {
info, err := f.client.Stat(realpath)
if e, ok := err.(*os.PathError); ok && e.Err == os.ErrNotExist {
return nil, fs.ErrorObjectNotFound
}
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
func (f *Fs) realpath(dir string) string {
return f.opt.Enc.FromStandardPath(xPath(f.Root(), dir))
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
)

View File

@@ -1,86 +0,0 @@
// +build !plan9
package hdfs
import (
"path"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
func init() {
fsi := &fs.RegInfo{
Name: "hdfs",
Description: "Hadoop distributed file system",
NewFs: NewFs,
Options: []fs.Option{{
Name: "namenode",
Help: "hadoop name node and port",
Required: true,
Examples: []fs.OptionExample{{
Value: "namenode:8020",
Help: "Connect to host namenode at port 8020",
}},
}, {
Name: "username",
Help: "hadoop user name",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root",
}},
}, {
Name: "service_principal_name",
Help: `Kerberos service principal name for the namenode
Enables KERBEROS authentication. Specifies the Service Principal Name
(<SERVICE>/<FQDN>) for the namenode.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "hdfs/namenode.hadoop.docker",
Help: "Namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.",
}},
Advanced: true,
}, {
Name: "data_transfer_protection",
Help: `Kerberos data transfer protection: authentication|integrity|privacy
Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",
}},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeColon),
}},
}
fs.Register(fsi)
}
// Options for this backend
type Options struct {
Namenode string `config:"namenode"`
Username string `config:"username"`
ServicePrincipalName string `config:"service_principal_name"`
DataTransferProtection string `config:"data_transfer_protection"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// xPath make correct file path with leading '/'
func xPath(root string, tail string) string {
if !strings.HasPrefix(root, "/") {
root = "/" + root
}
return path.Join(root, tail)
}

View File

@@ -1,20 +0,0 @@
// Test HDFS filesystem interface
// +build !plan9
package hdfs_test
import (
"testing"
"github.com/rclone/rclone/backend/hdfs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHdfs:",
NilObject: (*hdfs.Object)(nil),
})
}

View File

@@ -1,6 +0,0 @@
// Build for hdfs for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package hdfs

View File

@@ -1,177 +0,0 @@
// +build !plan9
package hdfs
import (
"context"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
// Object describes an HDFS file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
realpath := o.fs.realpath(o.Remote())
err := o.fs.client.Chtimes(realpath, modTime, modTime)
if err != nil {
return err
}
o.modTime = modTime
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Hash is not supported
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
realpath := o.realpath()
fs.Debugf(o.fs, "open [%s]", realpath)
f, err := o.fs.client.Open(realpath)
if err != nil {
return nil, err
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
if limit != -1 {
in = readers.NewLimitedReadCloser(f, limit)
} else {
in = f
}
return in, err
}
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 0755)
if err != nil {
return err
}
info, err := o.fs.client.Stat(realpath)
if err == nil {
err = o.fs.client.Remove(realpath)
if err != nil {
return err
}
}
out, err := o.fs.client.Create(realpath)
if err != nil {
return err
}
cleanup := func() {
rerr := o.fs.client.Remove(realpath)
if rerr != nil {
fs.Errorf(o.fs, "failed to remove [%v]: %v", realpath, rerr)
}
}
_, err = io.Copy(out, in)
if err != nil {
cleanup()
return err
}
err = out.Close()
if err != nil {
cleanup()
return err
}
info, err = o.fs.client.Stat(realpath)
if err != nil {
return err
}
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return err
}
o.size = info.Size()
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
realpath := o.fs.realpath(o.remote)
fs.Debugf(o.fs, "remove [%s]", realpath)
return o.fs.client.Remove(realpath)
}
func (o *Object) realpath() string {
return o.fs.opt.Enc.FromStandardPath(xPath(o.Fs().Root(), o.remote))
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
)

View File

@@ -13,7 +13,6 @@ import (
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -58,7 +57,7 @@ The input format is comma separated list of key,value pairs. Standard
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'. For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'. You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`, `,
Default: fs.CommaSepList{}, Default: fs.CommaSepList{},
Advanced: true, Advanced: true,
@@ -78,26 +77,6 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`, directories.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -107,7 +86,6 @@ If you set this option, rclone will not do the HEAD request. This will mean
type Options struct { type Options struct {
Endpoint string `config:"url"` Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"` NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"` Headers fs.CommaSepList `config:"headers"`
} }
@@ -115,9 +93,8 @@ type Options struct {
type Fs struct { type Fs struct {
name string name string
root string root string
features *fs.Features // optional features features *fs.Features // optional features
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL endpoint *url.URL
endpointURL string // endpoint as a string endpointURL string // endpoint as a string
httpClient *http.Client httpClient *http.Client
@@ -146,7 +123,7 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to // NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file. // the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -172,7 +149,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
client := fshttp.NewClient(ctx) client := fshttp.NewClient(fs.Config)
var isFile = false var isFile = false
if !strings.HasSuffix(u.String(), "/") { if !strings.HasSuffix(u.String(), "/") {
@@ -183,7 +160,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return http.ErrUseLastResponse return http.ErrUseLastResponse
} }
// check to see if points to a file // check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil) req, err := http.NewRequest("HEAD", u.String(), nil)
if err == nil { if err == nil {
addHeaders(req, opt) addHeaders(req, opt)
res, err := noRedir.Do(req) res, err := noRedir.Do(req)
@@ -209,19 +186,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err return nil, err
} }
ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
ci: ci,
httpClient: client, httpClient: client,
endpoint: u, endpoint: u,
endpointURL: u.String(), endpointURL: u.String(),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
}).Fill(ctx, f) }).Fill(f)
if isFile { if isFile {
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -262,7 +237,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.stat(ctx) err := o.stat()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -380,7 +355,7 @@ func (f *Fs) addHeaders(req *http.Request) {
} }
// Read the directory passed in // Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) { func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir) URL := f.url(dir)
u, err := url.Parse(URL) u, err := url.Parse(URL)
if err != nil { if err != nil {
@@ -390,7 +365,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL) return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
} }
// Do the request // Do the request
req, err := http.NewRequestWithContext(ctx, "GET", URL, nil) req, err := http.NewRequest("GET", URL, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "readDir failed") return nil, errors.Wrap(err, "readDir failed")
} }
@@ -433,54 +408,34 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if !strings.HasSuffix(dir, "/") && dir != "" { if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/" dir += "/"
} }
names, err := f.readDir(ctx, dir) names, err := f.readDir(dir)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir) return nil, errors.Wrapf(err, "error listing %q", dir)
} }
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
checkers = f.ci.Checkers
in = make(chan string, checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names { for _, name := range names {
isDir := name[len(name)-1] == '/' isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/") name = strings.TrimRight(name, "/")
remote := path.Join(dir, name) remote := path.Join(dir, name)
if isDir { if isDir {
add(fs.NewDir(remote, timeUnset)) dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else { } else {
in <- remote file := &Object{
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
} }
} }
close(in)
wg.Wait()
return entries, nil return entries, nil
} }
@@ -537,15 +492,9 @@ func (o *Object) url() string {
} }
// stat updates the info field in the Object // stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error { func (o *Object) stat() error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
url := o.url() url := o.url()
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) req, err := http.NewRequest("HEAD", url, nil)
if err != nil { if err != nil {
return errors.Wrap(err, "stat failed") return errors.Wrap(err, "stat failed")
} }
@@ -585,7 +534,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly return errorReadOnly
} }
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.) // Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
func (o *Object) Storable() bool { func (o *Object) Storable() bool {
return true return true
} }
@@ -593,7 +542,7 @@ func (o *Object) Storable() bool {
// Open a remote http file object for reading. Seek is supported // Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url() url := o.url()
req, err := http.NewRequestWithContext(ctx, "GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open failed") return nil, errors.Wrap(err, "Open failed")
} }

View File

@@ -15,7 +15,7 @@ import (
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler) ts := httptest.NewServer(handler)
// Configure the remote // Configure the remote
configfile.Install() config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug // fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true // fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true // fs.Config.DumpBodies = true
@@ -69,7 +69,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t) m, tidy := prepareServer(t)
// Instantiate it // Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m) f, err := NewFs(remoteName, "", m)
require.NoError(t, err) require.NoError(t, err)
return f, tidy return f, tidy
@@ -166,7 +166,8 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
tFile := fi.ModTime() tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second) dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found // check object not found
o, err = f.NewObject(context.Background(), "not found.txt") o, err = f.NewObject(context.Background(), "not found.txt")
@@ -214,7 +215,7 @@ func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t) m, tidy := prepareServer(t)
defer tidy() defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m) f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false) testListRoot(t, f, false)
@@ -224,7 +225,7 @@ func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t) m, tidy := prepareServer(t)
defer tidy() defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m) f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "") entries, err := f.List(context.Background(), "")

View File

@@ -1,11 +1,10 @@
package hubic package hubic
import ( import (
"context"
"net/http" "net/http"
"time" "time"
"github.com/ncw/swift/v2" "github.com/ncw/swift"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
) )
@@ -21,13 +20,13 @@ func newAuth(f *Fs) *auth {
} }
} }
// Request constructs an http.Request for authentication // Request constructs a http.Request for authentication
// //
// returns nil for not needed // returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) { func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10 const retries = 10
for try := 1; try <= retries; try++ { for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO()) err = a.f.getCredentials()
if err == nil { if err == nil {
break break
} }
@@ -38,7 +37,7 @@ func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Reques
} }
// Response parses the result of an http request // Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error { func (a *auth) Response(resp *http.Response) error {
return nil return nil
} }

View File

@@ -4,21 +4,22 @@ package hubic
// This uses the normal swift mechanism to update the credentials and // This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need // ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience. // to be revisted after some actual experience.
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"net/http" "net/http"
"strings" "strings"
"time" "time"
swiftLib "github.com/ncw/swift/v2" swiftLib "github.com/ncw/swift"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift" "github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
@@ -37,7 +38,7 @@ var (
// Description of how to auth for this app // Description of how to auth for this app
oauthConfig = &oauth2.Config{ oauthConfig = &oauth2.Config{
Scopes: []string{ Scopes: []string{
"credentials.r", // Read OpenStack credentials "credentials.r", // Read Openstack credentials
}, },
Endpoint: oauth2.Endpoint{ Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/", AuthURL: "https://api.hubic.com/oauth/auth/",
@@ -55,21 +56,28 @@ func init() {
Name: "hubic", Name: "hubic",
Description: "Hubic", Description: "Hubic",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(name string, m configmap.Mapper) {
return oauthutil.ConfigOut("", &oauthutil.Options{ err := oauthutil.Config("hubic", name, m, oauthConfig)
OAuth2Config: oauthConfig, if err != nil {
}) log.Fatalf("Failed to configure token: %v", err)
}
}, },
Options: append(oauthutil.SharedOptions, swift.SharedOptions...), Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
}) })
} }
// credentials is the JSON returned from the Hubic API to read the // credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials // OpenStack credentials
type credentials struct { type credentials struct {
Token string `json:"token"` // OpenStack token Token string `json:"token"` // Openstack token
Endpoint string `json:"endpoint"` // OpenStack endpoint Endpoint string `json:"endpoint"` // Openstack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00" Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
} }
// Fs represents a remote hubic // Fs represents a remote hubic
@@ -107,8 +115,8 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API // getCredentials reads the OpenStack Credentials using the Hubic API
// //
// The credentials are read into the Fs // The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) { func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil) req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil { if err != nil {
return err return err
} }
@@ -143,8 +151,8 @@ func (f *Fs) getCredentials(ctx context.Context) (err error) {
} }
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) client, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic") return nil, errors.Wrap(err, "failed to configure Hubic")
} }
@@ -154,14 +162,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// Make the swift Connection // Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{ c := &swiftLib.Connection{
Auth: newAuth(f), Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx), Transport: fshttp.NewTransport(fs.Config),
} }
err = c.Authenticate(ctx) err = c.Authenticate()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection") return nil, errors.Wrap(err, "error authenticating swift connection")
} }
@@ -174,7 +181,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
// Make inner swift Fs from the connection // Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true) swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile { if err != nil && err != fs.ErrorIsFile {
return nil, err return nil, err
} }

View File

@@ -46,57 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format // APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) } func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// WellKnown contains some configuration parameters for setting up endpoints
type WellKnown struct {
Issuer string `json:"issuer"`
AuthorizationEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
UserinfoEndpoint string `json:"userinfo_endpoint"`
EndSessionEndpoint string `json:"end_session_endpoint"`
JwksURI string `json:"jwks_uri"`
CheckSessionIframe string `json:"check_session_iframe"`
GrantTypesSupported []string `json:"grant_types_supported"`
ResponseTypesSupported []string `json:"response_types_supported"`
SubjectTypesSupported []string `json:"subject_types_supported"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
ResponseNodesSupported []string `json:"response_modes_supported"`
RegistrationEndpoint string `json:"registration_endpoint"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
ClaimsSupported []string `json:"claims_supported"`
ClaimTypesSupported []string `json:"claim_types_supported"`
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
ScopesSupported []string `json:"scopes_supported"`
RequestParameterSupported bool `json:"request_parameter_supported"`
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
IntrospectionEndpoint string `json:"introspection_endpoint"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2 // TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form. // providers returning a token in JSON form.
type TokenJSON struct { type TokenJSON struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number TokenType string `json:"token_type"`
RefreshExpiresIn int32 `json:"refresh_expires_in"` RefreshToken string `json:"refresh_token"`
RefreshToken string `json:"refresh_token"` ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
} }
// JSON structures returned by new API // JSON structures returned by new API
@@ -153,9 +109,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"` AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"` SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"` Usage int64 `json:"usage"`
Quota int64 `json:"quota"` Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"` BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"` BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"` WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"` ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"` LockedCause interface{} `json:"locked_cause"`
@@ -164,12 +120,6 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"` IOSHash string `json:"ios_hash"`
} }
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API // XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present // Flag is a hacky type for checking if an attribute is present
@@ -386,7 +336,7 @@ type Error struct {
Cause string `xml:"cause"` Cause string `xml:"cause"`
} }
// Error returns a string for the error and satisfies the error interface // Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string { func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode) out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" { if e.Message != "" {

Some files were not shown because too many files have changed in this diff Show More