1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-03 00:53:43 +00:00

Compare commits

..

1 Commits

1952 changed files with 98497 additions and 293450 deletions

49
.appveyor.yml Normal file
View File

@@ -0,0 +1,49 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

50
.circleci/config.yml Normal file
View File

@@ -0,0 +1,50 @@
---
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

7
.gitattributes vendored
View File

@@ -1,7 +0,0 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

View File

@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here: If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/rclone/rclone/issues/new https://github.com/ncw/rclone/issues/new
otherwise fill in the form below. otherwise fill in the form below.

View File

@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
#### Checklist #### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request). - [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate. - [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate. - [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages). - [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-) - [ ] I'm done, this Pull Request is ready for review :-)

View File

@@ -1,250 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.13.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.10
os: ubuntu-latest
go: '1.10.x'
modules: 'off'
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
quicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v1
with:
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
if: matrix.deploy && github.head_ref == ''
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
with:
path: ./src/github.com/${{ github.repository }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Build rclone
run: |
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: Upload artifacts
run: |
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
if: github.head_ref == ''

3
.gitignore vendored
View File

@@ -5,6 +5,3 @@ build
docs/public docs/public
rclone.iml rclone.iml
.idea .idea
.history
*.test
*.log

View File

@@ -1,5 +1,9 @@
# golangci-lint configuration options # golangci-lint configuration options
run:
build-tags:
- cmount
linters: linters:
enable: enable:
- deadcode - deadcode

2
.pkgr.yml Normal file
View File

@@ -0,0 +1,2 @@
default_dependencies: false
cli: rclone

109
.travis.yml Normal file
View File

@@ -0,0 +1,109 @@
---
language: go
sudo: required
dist: trusty
os:
- linux
go_import_path: github.com/ncw/rclone
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
install:
- make vars
env:
global:
- GOTAGS=cmount
- GO111MODULE=off
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
include:
- go: 1.8.x
script:
- make quicktest
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
env:
- GOTAGS=cmount
script:
- make build_dep
- make check
- make quicktest
- make racequicktest
- make compile_all
- os: osx
go: 1.12.x
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: ncw/rclone
all_branches: true
go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"

View File

@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info. getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/ncw/rclone).
Now in your terminal Now in your terminal
go get -u github.com/rclone/rclone go get -u github.com/ncw/rclone
cd $GOPATH/src/github.com/rclone/rclone cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
cd fs/sync cd fs/sync
go test -v -remote TestDrive: go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list go test -v -remote TestDrive: -subdir
cd fs/operations cd fs/operations
go test -v -remote TestDrive: go test -v -remote TestDrive:
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the all together with an HTML report and test retries then from the
project root: project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
@@ -135,7 +135,7 @@ then change into the project root and run
make test make test
This command is run daily on the integration test server. You can This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##
@@ -341,12 +341,6 @@ Getting going
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `go install -tags noencode`
* `rclone purge -v TestRemote:rclone-info`
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests Unit tests
@@ -368,59 +362,19 @@ Or if you want to run the integration tests manually:
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* `cd fs/sync` * `cd fs/sync`
* `go test -v -remote TestRemote:` * `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also * If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list` * `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests. See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
`Google Drive`) but with the local file system last.
* `README.md` - main GitHub page * `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org * `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation * `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant * `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -1,22 +0,0 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

View File

@@ -12,7 +12,6 @@ Current active maintainers of rclone are:
| Alex Chen | @Cnly | onedrive backend | | Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend | | Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends | | Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft** **This is a work in progress Draft**
@@ -52,7 +51,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up. Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ## ## Closing Tickets ##

17068
MANUAL.html

File diff suppressed because one or more lines are too long

6897
MANUAL.md

File diff suppressed because it is too large Load Diff

17724
MANUAL.txt

File diff suppressed because it is too large Load Diff

105
Makefile
View File

@@ -1,63 +1,44 @@
SHELL = bash SHELL = bash
# Branch we are working on BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0) LAST_TAG := $(shell git describe --tags --abbrev=0)
# If we are working on a release, override branch to master ifeq ($(BRANCH),$(LAST_TAG))
ifdef RELEASE_TAG
BRANCH := master BRANCH := master
endif endif
TAG_BRANCH := -$(BRANCH) TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/ BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),) ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH := TAG_BRANCH :=
BRANCH_PATH := BRANCH_PATH :=
endif endif
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;') NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
# TAG is current version + number of commits since last release + branch ifneq ($(TAG),$(LAST_TAG))
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(TAG)-beta TAG := $(TAG)-beta
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ ) GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
.PHONY: rclone test_all vars version .PHONY: rclone vars version
rclone: rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) touch fs/version.go
mkdir -p `go env GOPATH`/bin/ go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new cp -av `go env GOPATH`/bin/rclone .
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars: vars:
@echo SHELL="'$(SHELL)'" @echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'" @echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'" @echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'" @echo LAST_TAG="'$(LAST_TAG)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'" @echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'" @echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'" @echo BETA_URL="'$(BETA_URL)'"
@@ -65,7 +46,8 @@ version:
@echo '$(TAG)' @echo '$(TAG)'
# Full suite of integration tests # Full suite of integration tests
test: rclone test_all test: rclone
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log -test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log" @echo "Written logs in test_all.log"
@@ -78,8 +60,11 @@ racequicktest:
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./... @go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies # Get the build dependencies
@@ -88,8 +73,8 @@ build_dep:
# Get the release dependencies # Get the release dependencies
release_dep: release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz' go get -u github.com/goreleaser/nfpm/...
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2' go get -u github.com/aktau/github-release
# Update dependencies # Update dependencies
update: update:
@@ -97,11 +82,6 @@ update:
GO111MODULE=on go mod tidy GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md rclone.1: MANUAL.md
@@ -117,10 +97,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py ./bin/make_backend_docs.py
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
@@ -173,7 +153,7 @@ log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload: appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -189,45 +169,54 @@ ifndef BRANCH_PATH
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta: travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS)))) ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz' go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor # Fetch the binary builds from travis and appveyor
fetch_binaries: fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server -v -w cd docs && hugo server -v -w
tag: doc tag: doc
@echo "Old tag is $(VERSION)" @echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEXT_VERSION)" @echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION) bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md" @echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes" @echo "Then commit all the changes"
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v @echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc" @echo "And finally run make retag before make cross etc"
retag: retag:
git tag -f -s -m "Version $(VERSION)" $(VERSION) git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
startdev: startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
winzip: winzip:
zip -9 rclone-$(TAG).zip rclone.exe zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -1,4 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/) [![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) | [Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) | [Documentation](https://rclone.org/docs/) |
@@ -6,15 +6,13 @@
[Contributing](CONTRIBUTING.md) | [Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) [Forum](https://forum.rclone.org/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone) [![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master) [![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone # Rclone
@@ -22,27 +20,23 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers ## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/) * Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/) * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/) * Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
@@ -55,8 +49,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/) * pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
@@ -77,12 +70,9 @@ Please see [the full list of all storage providers and their features](https://r
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical * [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality * [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts * Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/)) * Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation ## Installation & documentation

View File

@@ -1,24 +1,17 @@
# Release Extra required software for making a release
This file describes how to make the various kinds of releases
## Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages * [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages * pandoc for making the html and man pages
## Making a release Making a release
* git status - make sure everything is checked in * git status - make sure everything is checked in
* Check travis & appveyor builds are green * Check travis & appveyor builds are green
* make check * make check
* make test # see integration test server or run locally * make test # see integration test server or run locally
* make tag * make tag
* edit docs/content/changelog.md * edit docs/content/changelog.md
* make tidy
* make doc * make doc
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX"
* make retag * make retag
* git push --tags origin master * git push --tags origin master
* # Wait for the appveyor and travis builds to complete then... * # Wait for the appveyor and travis builds to complete then...
@@ -33,7 +26,6 @@ This file describes how to make the various kinds of releases
* # announce with forum post, twitter post, G+ post * # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* make update * make update
* git status * git status
@@ -55,56 +47,24 @@ Can be fixed with
* GO111MODULE=on go mod vendor * GO111MODULE=on go mod vendor
## Making a point release Making a point release. If rclone needs a point release due to some
horrendous bug, then
If rclone needs a point release due to some horrendous bug: * git branch v1.XX v1.XX-fixes
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-fixes
* git cherry-pick any fixes * git cherry-pick any fixes
* Test (see above) * Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag * make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md * edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc * make TAG=v1.43.1 doc
* git commit -a -v -m "Version ${NEW_TAG}" * git commit -a -v -m "Version v1.XX.1"
* git tag -d ${NEW_TAG} * git tag -d -v1.XX.1
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG} * git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin ${BASE_TAG}-fixes * git push --tags -u origin v1.XX-fixes
* Wait for builds to complete * make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries * make TAG=v1.43.1 tarball
* make TAG=${NEW_TAG} tarball * make TAG=v1.43.1 sign_upload
* make TAG=${NEW_TAG} sign_upload * make TAG=v1.43.1 check_sign
* make TAG=${NEW_TAG} check_sign * make TAG=v1.43.1 upload
* make TAG=${NEW_TAG} upload * make TAG=v1.43.1 upload_website
* make TAG=${NEW_TAG} upload_website * make TAG=v1.43.1 upload_github
* make TAG=${NEW_TAG} upload_github * NB this overwrites the current beta so after the release, rebuild the last travis build
* NB this overwrites the current beta so we need to do this
* git co master
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* git push
* Announce! * Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +0,0 @@
v1.50.2

View File

@@ -4,17 +4,17 @@ import (
"errors" "errors"
"strings" "strings"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/ncw/rclone/fs/fspath"
) )
// Register with Fs // Register with Fs
func init() { func init() {
fsi := &fs.RegInfo{ fsi := &fs.RegInfo{
Name: "alias", Name: "alias",
Description: "Alias for an existing remote", Description: "Alias for a existing remote",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",

View File

@@ -1,16 +1,15 @@
package alias package alias
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
"testing" "testing"
_ "github.com/rclone/rclone/backend/local" // pull in test backend _ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot) prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what) require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList) gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what) require.NoError(t, err, what)
sort.Sort(gotEntries) sort.Sort(gotEntries)

View File

@@ -2,38 +2,31 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/ncw/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive" _ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/ncw/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/ncw/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker" _ "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/ncw/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/ncw/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/ncw/rclone/backend/http"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/ncw/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/http" _ "github.com/ncw/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/mailru" _ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/ncw/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/ncw/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/premiumizeme" _ "github.com/ncw/rclone/backend/union"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/ncw/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/ncw/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
) )

View File

@@ -12,7 +12,6 @@ we ignore assets completely!
*/ */
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -23,23 +22,22 @@ import (
"time" "time"
acd "github.com/ncw/go-acd" acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
const ( const (
enc = encodings.AmazonCloudDrive
folderKind = "FOLDER" folderKind = "FOLDER"
fileKind = "FILE" fileKind = "FILE"
statusAvailable = "AVAILABLE" statusAvailable = "AVAILABLE"
@@ -249,7 +247,6 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -311,7 +308,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f) f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -319,12 +316,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = tempF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(ctx, remote, nil) _, err := tempF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -334,7 +331,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -356,7 +353,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -365,7 +362,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// Set info but not meta // Set info but not meta
o.info = info o.info = info
} else { } else {
err := o.readMetaData(ctx) // reads info and meta, returning an error err := o.readMetaData() // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -375,18 +372,18 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
var subFolder *acd.Folder var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf)) subFolder, resp, err = folder.GetFolder(leaf)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -407,13 +404,13 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
var info *acd.Folder var info *acd.Folder
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf)) info, resp, err = folder.CreateFolder(leaf)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -481,7 +478,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent { if !hasValidParent {
continue continue
} }
*node.Name = enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing // Store the nodes up in case we have to retry the listing
out = append(out, node) out = append(out, node)
} }
@@ -506,12 +502,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -529,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, when).SetID(*node.Id) d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d) entries = append(entries, d)
case fileKind: case fileKind:
o, err := f.newObjectWithInfo(ctx, remote, node) o, err := f.newObjectWithInfo(remote, node)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -573,7 +569,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// At the end of large uploads. The speculation is that the timeout // At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well // is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded. // be properly uploaded.
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well // Return if no error - all is well
if inErr == nil { if inErr == nil {
return false, inInfo, inErr return false, inInfo, inErr
@@ -613,7 +609,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote() remote := src.Remote()
for i := 1; i <= retries; i++ { for i := 1; i <= retries; i++ {
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil { } else if err != nil {
@@ -639,7 +635,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
// Temporary Object under construction // Temporary Object under construction
@@ -648,17 +644,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
remote: remote, remote: remote,
} }
// Check if object already exists // Check if object already exists
err := o.readMetaData(ctx) err := o.readMetaData()
switch err { switch err {
case nil: case nil:
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
default: default:
return nil, err return nil, err
} }
// If not create it // If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -671,10 +667,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now() start := time.Now()
f.tokenRenewer.Start() f.tokenRenewer.Start()
info, resp, err = folder.Put(in, enc.FromStandardName(leaf)) info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop() f.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -688,13 +684,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true) _, err = f.dirCache.FindDir(dir, true)
} }
return err return err
} }
@@ -708,7 +704,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
@@ -717,15 +713,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// create the destination directory if necessary // create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -741,12 +737,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= fs.Config.LowLevelRetries; i++ { for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
return nil, srcErr return nil, srcErr
} }
dstObj, dstErr = f.NewObject(ctx, remote) dstObj, dstErr = f.NewObject(remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound { if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst // exit if error on dst
return nil, dstErr return nil, dstErr
@@ -775,7 +771,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(src, "DirMove error: not same remote type") fs.Debugf(src, "DirMove error: not same remote type")
@@ -791,14 +787,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// find the root src directory // find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false) err = srcFs.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true) err = f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
@@ -813,14 +809,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false) _, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -836,7 +832,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcRemote == "" { if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID() srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else { } else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) _, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
} }
if err != nil { if err != nil {
return err return err
@@ -844,7 +840,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
srcLeaf, _ := dircache.SplitPath(srcPath) srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -877,17 +873,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// purgeCheck remotes the root directory, if check is set then it // purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false) err := dc.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -936,8 +932,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -959,7 +955,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { //func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object) // srcObj, ok := src.(*Object)
// if !ok { // if !ok {
// fs.Debugf(src, "Can't copy - not same remote type") // fs.Debugf(src, "Can't copy - not same remote type")
@@ -970,7 +966,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil { // if err != nil {
// return nil, err // return nil, err
// } // }
// return f.NewObject(ctx, remote), nil // return f.NewObject(remote), nil
//} //}
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -978,8 +974,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, "", false) return f.purgeCheck("", false)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -1003,7 +999,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1026,11 +1022,11 @@ func (o *Object) Size() int64 {
// it also sets the info // it also sets the info
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if o.info != nil { if o.info != nil {
return nil return nil
} }
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@@ -1041,7 +1037,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response var resp *http.Response
var info *acd.File var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(enc.FromStandardName(leaf)) info, resp, err = folder.GetFile(leaf)
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1059,8 +1055,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1074,7 +1070,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented // FIXME not implemented
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1085,7 +1081,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject { if bigObject {
fs.Debugf(o, "Downloading large object via tempLink") fs.Debugf(o, "Downloading large object via tempLink")
@@ -1097,7 +1093,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if !bigObject { if !bigObject {
in, resp, err = file.OpenHeaders(headers) in, resp, err = file.OpenHeaders(headers)
} else { } else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
} }
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
@@ -1107,7 +1103,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info} file := acd.File{Node: o.info}
var info *acd.File var info *acd.File
var resp *http.Response var resp *http.Response
@@ -1118,7 +1114,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info, resp, err = file.Overwrite(in) info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop() o.fs.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -1143,7 +1139,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.removeNode(o.info) return o.fs.removeNode(o.info)
} }
@@ -1161,7 +1157,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) { func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(enc.FromStandardName(newName)) newInfo, resp, err = info.Rename(newName)
return f.shouldRetry(resp, err) return f.shouldRetry(resp, err)
}) })
return newInfo, err return newInfo, err
@@ -1265,7 +1261,7 @@ OnConflict:
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType return *o.info.ContentProperties.ContentType
} }
@@ -1278,7 +1274,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// Automatically restarts itself in case of unexpected behaviour of the remote. // Automatically restarts itself in case of unexpected behaviour of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint checkpoint := f.opt.Checkpoint
go func() { go func() {
@@ -1357,11 +1353,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 { if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok { if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name // and append the drive file name to compute the full file name
name := enc.ToStandardName(*node.Name)
if len(path) > 0 { if len(path) > 0 {
path = path + "/" + name path = path + "/" + *node.Name
} else { } else {
path = name path = *node.Name
} }
// this will now clear the actual file too // this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject}) pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/amazonclouddrive" "github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob
@@ -16,6 +16,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -23,18 +24,16 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
) )
const ( const (
@@ -54,15 +53,8 @@ const (
maxUploadCutoff = 256 * fs.MebiByte maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing) maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
// though it is a base64 key checked in here, it is publicly available secret.
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
) )
const enc = encodings.AzureBlob
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -71,17 +63,13 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)", Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
}, { }, {
Name: "key", Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)", Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)", Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.", Help: "Endpoint for the service\nLeave blank normally.",
@@ -141,25 +129,23 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"` ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"` AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
} }
// Fs represents a remote azure server // Fs represents a remote azure server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed config options opt Options // parsed config options
features *fs.Features // optional features features *fs.Features // optional features
client *http.Client // http client we are using client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL svcURL *azblob.ServiceURL // reference to serviceURL
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache cntURL *azblob.ContainerURL // reference to containerURL
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container container string // the container we are working on
rootContainer string // container part of root (if any) containerOKMu sync.Mutex // mutex to protect container OK
rootDirectory string // directory part of root (if any) containerOK bool // true if we have created the container
isLimited bool // if limited to one container containerDeleted bool // true if we have deleted the container
cache *bucket.Cache // cache for container creation status pacer *fs.Pacer // To pace and retry the API calls
pacer *fs.Pacer // To pace and retry the API calls uploadToken *pacer.TokenDispenser // control concurrency
uploadToken *pacer.TokenDispenser // control concurrency
} }
// Object describes a azure object // Object describes a azure object
@@ -183,18 +169,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
return f.root if f.root == "" {
return f.container
}
return f.container + "/" + f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.rootContainer == "" { if f.root == "" {
return fmt.Sprintf("Azure root") return fmt.Sprintf("Azure container %s", f.container)
} }
if f.rootDirectory == "" { return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
return fmt.Sprintf("Azure container %s", f.rootContainer)
}
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@@ -202,24 +188,21 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// parsePath parses a remote 'url' // Pattern to match a azure path
func parsePath(path string) (root string) { var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
root = strings.Trim(path, "/")
// parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in azure path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return return
} }
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// validateAccessTier checks if azureblob supports user supplied tier // validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool { func validateAccessTier(tier string) bool {
switch tier { switch tier {
@@ -312,9 +295,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
// //
// this code was copied from azblob.NewPipeline // this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline { func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Don't log stuff to syslog/Windows Event log
pipeline.SetForceLogEnabled(false)
// Closest to API goes first; closest to the wire goes last // Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{ factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry), azblob.NewTelemetryPolicyFactory(o.Telemetry),
@@ -327,15 +307,8 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log}) return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
} }
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -354,6 +327,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ListChunkSize > maxListChunkSize { if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
} }
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Endpoint == "" { if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL opt.Endpoint = storageDefaultBaseURL
} }
@@ -368,38 +345,26 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{ f := &Fs{
name: name, name: name,
opt: *opt, opt: *opt,
container: container,
root: directory,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers), uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config), client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
} }
f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, SetTier: true,
SetTier: true, GetTier: true,
GetTier: true,
}).Fill(f) }).Fill(f)
var ( var (
u *url.URL u *url.URL
serviceURL azblob.ServiceURL serviceURL azblob.ServiceURL
containerURL azblob.ContainerURL
) )
switch { switch {
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.Account != "" && opt.Key != "": case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil { if err != nil {
@@ -412,6 +377,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "": case opt.SASURL != "":
u, err = url.Parse(opt.SASURL) u, err = url.Parse(opt.SASURL)
if err != nil { if err != nil {
@@ -422,30 +388,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Check if we have container level SAS or account level sas // Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u) parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" { if parts.ContainerName != "" {
if f.rootContainer != "" && parts.ContainerName != f.rootContainer { if container != "" && parts.ContainerName != container {
return nil, errors.New("Container name in SAS URL and container provided in command do not match") return nil, errors.New("Container name in SAS URL and container provided in command do not match")
} }
containerURL := azblob.NewContainerURL(*u, pipeline)
f.cntURLcache[parts.ContainerName] = &containerURL f.container = parts.ContainerName
f.isLimited = true containerURL = azblob.NewContainerURL(*u, pipeline)
} else { } else {
serviceURL = azblob.NewServiceURL(*u, pipeline) serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
} }
default: default:
return nil, errors.New("Need account+key or connectionString or sasURL") return nil, errors.New("Need account+key or connectionString or sasURL")
} }
f.svcURL = &serviceURL f.svcURL = &serviceURL
f.cntURL = &containerURL
if f.rootContainer != "" && f.rootDirectory != "" { if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file // Check to see if the (container,directory) is actually an existing file
oldRoot := f.root oldRoot := f.root
newRoot, leaf := path.Split(oldRoot) remote := path.Base(directory)
f.setRoot(newRoot) f.root = path.Dir(directory)
_, err := f.NewObject(ctx, leaf) if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f // File doesn't exist or is a directory so return old f
f.setRoot(oldRoot) f.root = oldRoot
return f, nil return f, nil
} }
return nil, err return nil, err
@@ -456,20 +430,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil return f, nil
} }
// return the container URL for the container passed in
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
var ok bool
if containerURL, ok = f.cntURLcache[container]; !ok {
cntURL := f.svcURL.NewContainerURL(container)
containerURL = &cntURL
f.cntURLcache[container] = containerURL
}
return containerURL
}
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -494,13 +454,13 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// getBlobReference creates an empty blob reference with no metadata // getBlobReference creates an empty blob reference with no metadata
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL { func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
return f.cntURL(container).NewBlobURL(containerPath) return f.cntURL.NewBlobURL(f.root + remote)
} }
// updateMetadataWithModTime adds the modTime passed in to o.meta. // updateMetadataWithModTime adds the modTime passed in to o.meta.
@@ -536,18 +496,16 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied // the container and root supplied
// //
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
// The remote has prefix removed from it and if addContainer is set then f.containerOKMu.Lock()
// it adds the container to the start. deleted := f.containerDeleted
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error { f.containerOKMu.Unlock()
if f.cache.IsDeleted(container) { if deleted {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
} }
if prefix != "" { root := f.root
prefix += "/" if dir != "" {
} root += dir + "/"
if directory != "" {
directory += "/"
} }
delimiter := "" delimiter := ""
if !recurse { if !recurse {
@@ -562,14 +520,16 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
UncommittedBlobs: false, UncommittedBlobs: false,
Deleted: false, Deleted: false,
}, },
Prefix: directory, Prefix: root,
MaxResults: int32(maxResults), MaxResults: int32(maxResults),
} }
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); { for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
var err error var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options) response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(err) return f.shouldRetry(err)
}) })
@@ -582,24 +542,33 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
} }
// Advance marker to next // Advance marker to next
marker = response.NextMarker marker = response.NextMarker
for i := range response.Segment.BlobItems { for i := range response.Segment.BlobItems {
file := &response.Segment.BlobItems[i] file := &response.Segment.BlobItems[i]
// Finish if file name no longer has prefix // Finish if file name no longer has prefix
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) { // if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil // return nil
// } // }
remote := enc.ToStandardPath(file.Name) if !strings.HasPrefix(file.Name, f.root) {
if !strings.HasPrefix(remote, prefix) { fs.Debugf(f, "Odd name received %q", file.Name)
fs.Debugf(f, "Odd name received %q", remote)
continue continue
} }
remote = remote[len(prefix):] remote := file.Name[len(f.root):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) { if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker continue // skip directory marker
} }
if addContainer {
remote = path.Join(container, remote)
}
// Send object // Send object
err = fn(remote, file, false) err = fn(remote, file, false)
if err != nil { if err != nil {
@@ -609,14 +578,14 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// Send the subdirectories // Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes { for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/") remote := strings.TrimRight(remote.Name, "/")
remote = enc.ToStandardPath(remote) if !strings.HasPrefix(remote, f.root) {
if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd directory name received %q", remote) fs.Debugf(f, "Odd directory name received %q", remote)
continue continue
} }
remote = remote[len(prefix):] remote = remote[len(f.root):]
if addContainer { // Don't send if already sent as a directory marker
remote = path.Join(container, remote) if _, found := directoryMarkers[remote]; found {
continue
} }
// Send object // Send object
err = fn(remote, nil, true) err = fn(remote, nil, true)
@@ -641,9 +610,19 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil return o, nil
} }
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerDeleted = false
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -657,24 +636,17 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
return nil, err return nil, err
} }
// container must be present if listing succeeded // container must be present if listing succeeded
f.cache.MarkOK(container) f.markContainerOK()
return entries, nil return entries, nil
} }
// listContainers returns all the containers to out // listContainers returns all the containers to out
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) { func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if f.isLimited { if dir != "" {
f.cntURLcacheMu.Lock() return nil, fs.ErrorListBucketRequired
for container := range f.cntURLcache {
d := fs.NewDir(container, time.Time{})
entries = append(entries, d)
}
f.cntURLcacheMu.Unlock()
return entries, nil
} }
err = f.listContainersToFn(func(container *azblob.ContainerItem) error { err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified) d := fs.NewDir(container.Name, container.Properties.LastModified)
f.cache.MarkOK(container.Name)
entries = append(entries, d) entries = append(entries, d)
return nil return nil
}) })
@@ -693,15 +665,11 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir) if f.container == "" {
if container == "" { return f.listContainers(dir)
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
} }
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "") return f.listDir(dir)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -720,44 +688,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir) if f.container == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error { err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory)
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil { if err != nil {
return err return err
} }
for _, entry := range entries { return list.Add(entry)
err = list.Add(entry) })
if err != nil { if err != nil {
return err return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
} }
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush() return list.Flush()
} }
@@ -798,52 +745,95 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
fs := &Object{ fs := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return fs, fs.Update(ctx, in, src, options...) return fs, fs.Update(in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
container, _ := f.split(dir) f.containerOKMu.Lock()
return f.makeContainer(ctx, container) defer f.containerOKMu.Unlock()
} if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// makeContainer creates the container if it doesn't exist // now try to create the container
func (f *Fs) makeContainer(ctx context.Context, container string) error { err := f.pacer.Call(func() (bool, error) {
return f.cache.Create(container, func() error { ctx := context.Background()
// now try to create the container _, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
return f.pacer.Call(func() (bool, error) { if err != nil {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) if storageErr, ok := err.(azblob.StorageError); ok {
if err != nil { switch storageErr.ServiceCode() {
if storageErr, ok := err.(azblob.StorageError); ok { case azblob.ServiceCodeContainerAlreadyExists:
switch storageErr.ServiceCode() { f.containerOK = true
case azblob.ServiceCodeContainerAlreadyExists: return false, nil
return false, nil case azblob.ServiceCodeContainerBeingDeleted:
case azblob.ServiceCodeContainerBeingDeleted: // From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container // When a container is deleted, a container with the same name cannot be created
// When a container is deleted, a container with the same name cannot be created // for at least 30 seconds; the container may not be available for more than 30
// for at least 30 seconds; the container may not be available for more than 30 // seconds if the service is still processing the request.
// seconds if the service is still processing the request. time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds f.containerDeleted = true
f.cache.MarkDeleted(container) return true, err
return true, err
}
} }
} }
return f.shouldRetry(err) }
}) return f.shouldRetry(err)
}, nil) })
if err == nil {
f.containerOK = true
f.containerDeleted = false
}
return errors.Wrap(err, "failed to make container")
} }
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not // isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) { func (f *Fs) isEmpty(dir string) (err error) {
empty := true empty := true
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false empty = false
return nil return nil
}) })
@@ -858,42 +848,47 @@ func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err erro
// deleteContainer deletes the container. It can delete a full // deleteContainer deletes the container. It can delete a full
// container so use isEmpty if you don't want that. // container so use isEmpty if you don't want that.
func (f *Fs) deleteContainer(ctx context.Context, container string) error { func (f *Fs) deleteContainer() error {
return f.cache.Remove(container, func() error { f.containerOKMu.Lock()
options := azblob.ContainerAccessConditions{} defer f.containerOKMu.Unlock()
return f.pacer.Call(func() (bool, error) { options := azblob.ContainerAccessConditions{}
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{}) ctx := context.Background()
if err == nil { err := f.pacer.Call(func() (bool, error) {
_, err = f.cntURL(container).Delete(ctx, options) _, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
} if err == nil {
_, err = f.cntURL.Delete(ctx, options)
}
if err != nil { if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes // Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) { if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
} }
return f.shouldRetry(err) return f.shouldRetry(err)
}) }
return f.shouldRetry(err)
}) })
if err == nil {
f.containerOK = false
f.containerDeleted = true
}
return errors.Wrap(err, "failed to delete container")
} }
// Rmdir deletes the container if the fs is at the root // Rmdir deletes the container if the fs is at the root
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
container, directory := f.split(dir) err := f.isEmpty(dir)
if container == "" || directory != "" {
return nil
}
err := f.isEmpty(ctx, container, directory)
if err != nil { if err != nil {
return err return err
} }
return f.deleteContainer(ctx, container) if f.root != "" || dir != "" {
return nil
}
return f.deleteContainer()
} }
// Precision of the remote // Precision of the remote
@@ -907,14 +902,13 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
dir := "" // forward compat! dir := "" // forward compat!
container, directory := f.split(dir) if f.root != "" || dir != "" {
if container == "" || directory != "" { // Delegate to caller if not root container
// Delegate to caller if not root of a container
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return f.deleteContainer(ctx, container) return f.deleteContainer()
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -926,9 +920,8 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote) err := f.Mkdir("")
err := f.makeContainer(ctx, dstContainer)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -937,7 +930,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
dstBlobURL := f.getBlobReference(dstContainer, dstPath) dstBlobURL := f.getBlobReference(remote)
srcBlobURL := srcObj.getBlobReference() srcBlobURL := srcObj.getBlobReference()
source, err := url.Parse(srcBlobURL.String()) source, err := url.Parse(srcBlobURL.String())
@@ -946,6 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
options := azblob.BlobAccessConditions{} options := azblob.BlobAccessConditions{}
ctx := context.Background()
var startCopy *azblob.BlobStartCopyFromURLResponse var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -966,7 +960,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
copyStatus = getMetadata.CopyStatus() copyStatus = getMetadata.CopyStatus()
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -990,7 +984,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1070,8 +1064,7 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// getBlobReference creates an empty blob reference with no metadata // getBlobReference creates an empty blob reference with no metadata
func (o *Object) getBlobReference() azblob.BlobURL { func (o *Object) getBlobReference() azblob.BlobURL {
container, directory := o.split() return o.fs.getBlobReference(o.remote)
return o.fs.getBlobReference(container, directory)
} }
// clearMetaData clears enough metadata so readMetaData will re-read it // clearMetaData clears enough metadata so readMetaData will re-read it
@@ -1123,7 +1116,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err) fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err return err
} }
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC() o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil return nil
} }
@@ -1131,14 +1124,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) { func (o *Object) ModTime() (result time.Time) {
// The error is logged in readMetaData // The error is logged in readMetaData
_ = o.readMetaData() _ = o.readMetaData()
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// Make sure o.meta is not nil // Make sure o.meta is not nil
if o.meta == nil { if o.meta == nil {
o.meta = make(map[string]string, 1) o.meta = make(map[string]string, 1)
@@ -1147,6 +1140,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.meta[modTimeKey] = modTime.Format(timeFormatOut) o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}) _, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1164,14 +1158,14 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download // Offset and Count for range download
var offset int64 var offset int64
var count int64 var count int64
if o.AccessTier() == azblob.AccessTierArchive { if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first") return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
} }
fs.FixRangeOption(options, o.size)
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.RangeOption: case *fs.RangeOption:
@@ -1188,6 +1182,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
} }
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1376,27 +1371,26 @@ outer:
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
container, _ := o.split() err = o.fs.Mkdir("")
err = o.fs.makeContainer(ctx, container)
if err != nil { if err != nil {
return err return err
} }
size := src.Size() size := src.Size()
// Update Mod time // Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx)) o.updateMetadataWithModTime(src.ModTime())
if err != nil { if err != nil {
return err return err
} }
blob := o.getBlobReference() blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{} httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(ctx, o) httpHeaders.ContentType = fs.MimeType(o)
// Compute the Content-MD5 of the file, for multiparts uploads it // Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block // Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport // in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" { if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5) sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil { if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes httpHeaders.ContentMD5 = sourceMD5bytes
@@ -1414,13 +1408,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk // is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around. // size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653 // See: https://github.com/ncw/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff) multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) { if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size) fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
} }
ctx := context.Background()
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload { if multipartUpload {
@@ -1453,10 +1448,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
blob := o.getBlobReference() blob := o.getBlobReference()
snapShotOptions := azblob.DeleteSnapshotsOptionNone snapShotOptions := azblob.DeleteSnapshotsOptionNone
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
ctx := context.Background()
return o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac) _, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1464,7 +1460,7 @@ func (o *Object) Remove(ctx context.Context) error {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }
@@ -1516,6 +1512,4 @@ var (
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
) )

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob

View File

@@ -1,14 +1,14 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 solaris // +build plan9 solaris !go1.8
package azureblob package azureblob

View File

@@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fserrors"
) )
// Error describes a B2 error response // Error describes a B2 error response
@@ -50,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC) // MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) { func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano() timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
} }
// UnmarshalJSON turns JSON into a Timestamp // UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
return err return err
} }
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC()) *t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil return nil
} }
@@ -189,21 +189,6 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
} }
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file // FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct { type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -326,14 +311,3 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket. BucketID string `json:"bucketId"` // The unique ID of the bucket.
} }
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}

View File

@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/b2/api" "github.com/ncw/rclone/backend/b2/api"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
) )
// Test b2 string encoding // Test b2 string encoding

View File

@@ -4,8 +4,8 @@ package b2
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -6,7 +6,6 @@ package b2
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -15,12 +14,12 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
) )
type hashAppendingReader struct { type hashAppendingReader struct {
@@ -81,7 +80,7 @@ type largeUpload struct {
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote remote := o.remote
size := src.Size() size := src.Size()
parts := int64(0) parts := int64(0)
@@ -99,34 +98,33 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts sha1SliceSize = parts
} }
modTime := src.ModTime(ctx) modTime := src.ModTime()
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
} }
bucket, bucketPath := o.split() bucketID, err := f.getBucketID()
bucketID, err := f.getBucketID(ctx, bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: enc.FromStandardPath(bucketPath), Name: o.fs.root + remote,
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(src),
Info: map[string]string{ Info: map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
}, },
} }
// Set the SHA1 if known // Set the SHA1 if known
if !o.fs.opt.DisableCheckSum { if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1 request.Info[sha1Key] = calculatedSha1
} }
} }
var response api.StartLargeFileResponse var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -150,7 +148,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
defer up.uploadMu.Unlock() defer up.uploadMu.Unlock()
if len(up.uploads) == 0 { if len(up.uploads) == 0 {
@@ -162,8 +160,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id, ID: up.id,
} }
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload) resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL") return nil, errors.Wrap(err, "failed to get upload URL")
@@ -192,12 +190,12 @@ func (up *largeUpload) clearUploadURL() {
} }
// Transfer a chunk // Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error { func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL()
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -241,8 +239,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
@@ -264,7 +262,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
} }
// finish closes off the large upload // finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error { func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts) fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -276,8 +274,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
var response api.FileInfo var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
return err return err
@@ -286,7 +284,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
} }
// cancel aborts the large upload // cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error { func (up *largeUpload) cancel() error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_cancel_large_file", Path: "/b2_cancel_large_file",
@@ -296,18 +294,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
} }
var response api.CancelLargeFileResponse var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response) resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err) return up.f.shouldRetry(resp, err)
}) })
return err return err
} }
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) { func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1) wg.Add(1)
go func(part int64, buf []byte) { go func(part int64, buf []byte) {
defer wg.Done() defer wg.Done()
defer up.f.putUploadBlock(buf) defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf) err := up.transferChunk(part, buf)
if err != nil { if err != nil {
select { select {
case errs <- err: case errs <- err:
@@ -317,7 +315,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
}(part, buf) }(part, buf)
} }
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error { func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil { if err == nil {
select { select {
case err = <-errs: case err = <-errs:
@@ -326,19 +324,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
} }
if err != nil { if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err) fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx) cancelErr := up.cancel()
if cancelErr != nil { if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr) fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
} }
return err return err
} }
return up.finish(ctx) return up.finish()
} }
// Stream uploads the chunks from the input, starting with a required initial // Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input // chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF. // reaches EOF.
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) { func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1) errs := make(chan error, 1)
hasMoreParts := true hasMoreParts := true
@@ -346,7 +344,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
// Transfer initial chunk // Transfer initial chunk
up.size = int64(len(initialUploadBlock)) up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock) up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer: outer:
for part := int64(2); hasMoreParts; part++ { for part := int64(2); hasMoreParts; part++ {
@@ -388,16 +386,16 @@ outer:
} }
// Transfer the chunk // Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf) up.managedTransferChunk(&wg, errs, part, buf)
} }
wg.Wait() wg.Wait()
up.sha1s = up.sha1s[:up.parts] up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(ctx, err, errs) return up.finishOrCancelOnError(err, errs)
} }
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error { func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id) fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size remaining := up.size
errs := make(chan error, 1) errs := make(chan error, 1)
@@ -428,10 +426,10 @@ outer:
} }
// Transfer the chunk // Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf) up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize remaining -= reqSize
} }
wg.Wait() wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs) return up.finishOrCancelOnError(err, errs)
} }

View File

@@ -202,23 +202,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"` } `json:"attributes"`
} }
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}

View File

@@ -10,13 +10,8 @@ package box
// FIXME box can copy a directory // FIXME box can copy a directory
import ( import (
"context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"log" "log"
"net/http" "net/http"
"net/url" "net/url"
@@ -25,31 +20,22 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/lib/jwtutil" "github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/youmark/pkcs8" "github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
) )
const enc = encodings.Box
const ( const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho" rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL" rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
@@ -62,7 +48,6 @@ const (
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024 defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
) )
// Globals // Globals
@@ -87,34 +72,9 @@ func init() {
Description: "Box", Description: "Box",
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file") err := oauthutil.Config("box", name, m, oauthConfig)
boxSubType, boxSubTypeOk := m.Get("box_sub_type") if err != nil {
var err error log.Fatalf("Failed to configure token: %v", err)
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
} }
}, },
Options: []fs.Option{{ Options: []fs.Option{{
@@ -123,19 +83,6 @@ func init() {
}, { }, {
Name: config.ConfigClientSecret, Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.", Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, { }, {
Name: "upload_cutoff", Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).", Help: "Cutoff for switching to multipart upload (>= 50MB).",
@@ -150,74 +97,6 @@ func init() {
}) })
} }
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -301,10 +180,22 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// substitute reserved characters for box
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "", -1)
}
// restore reserved characters for box
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -312,7 +203,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err return nil, err
} }
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool { found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf { if item.Name == leaf {
info = item info = item
return true return true
@@ -347,7 +238,6 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -381,7 +271,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "") _, err := f.readMetaDataForPath("")
return err return err
}) })
@@ -389,7 +279,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -397,12 +287,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = tempF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(ctx, remote, nil) _, err := tempF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -413,7 +303,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features.Fill(&tempF) f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -433,7 +323,7 @@ func (f *Fs) rootSlash() string {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -443,7 +333,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// Set info // Set info
err = o.setMetaData(info) err = o.setMetaData(info)
} else { } else {
err = o.readMetaData(ctx) // reads info and meta, returning an error err = o.readMetaData() // reads info and meta, returning an error
} }
if err != nil { if err != nil {
return nil, err return nil, err
@@ -453,14 +343,14 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool { found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf { if item.Name == leaf {
pathIDOut = item.ID pathIDOut = item.ID
return true return true
@@ -478,7 +368,7 @@ func fieldsValue() url.Values {
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
@@ -488,13 +378,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
mkdir := api.CreateFolder{ mkdir := api.CreateFolder{
Name: enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
Parent: api.Parent{ Parent: api.Parent{
ID: pathID, ID: pathID,
}, },
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info) resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -516,7 +406,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found // Lists the directory required calling the user function on each item found
// //
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/folders/" + dirID + "/items", Path: "/folders/" + dirID + "/items",
@@ -531,7 +421,7 @@ OUTER:
var result api.FolderItems var result api.FolderItems
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -554,7 +444,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive { if item.ItemStatus != api.ItemStatusActive {
continue continue
} }
item.Name = enc.ToStandardName(item.Name) item.Name = restoreReservedChars(item.Name)
if fn(item) { if fn(item) {
found = true found = true
break OUTER break OUTER
@@ -577,17 +467,17 @@ OUTER:
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var iErr error var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool { _, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name) remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder { if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups // cache the directory ID for later lookups
@@ -596,7 +486,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// FIXME more info from dir? // FIXME more info from dir?
entries = append(entries, d) entries = append(entries, d)
} else if info.Type == api.ItemTypeFile { } else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info) o, err := f.newObjectWithInfo(remote, info)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -620,9 +510,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the object, leaf, directoryID and error // Returns the object, leaf, directoryID and error
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return return
} }
@@ -639,22 +529,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err { switch err {
case nil: case nil:
return existingObj, existingObj.Update(ctx, in, src, options...) return existingObj, existingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src) return f.PutUnchecked(in, src)
default: default:
return nil, err return nil, err
} }
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// PutUnchecked the object into the container // PutUnchecked the object into the container
@@ -664,56 +554,56 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime()
o, _, _, err := f.createObject(ctx, remote, modTime, size) o, _, _, err := f.createObject(remote, modTime, size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true) _, err = f.dirCache.FindDir(dir, true)
} }
return err return err
} }
// deleteObject removes an object by ID // deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error { func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/" + id, Path: "/files/" + id,
NoResponse: true, NoResponse: true,
} }
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts) resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
} }
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false) err := dc.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -727,7 +617,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check)) opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts) resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -743,8 +633,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -761,13 +651,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
err := srcObj.readMetaData(ctx) err := srcObj.readMetaData()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -779,7 +669,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -790,8 +680,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/files/" + srcObj.id + "/copy", Path: "/files/" + srcObj.id + "/copy",
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{ copyFile := api.CopyFile{
Name: enc.FromStandardName(leaf), Name: replacedLeaf,
Parent: api.Parent{ Parent: api.Parent{
ID: directoryID, ID: directoryID,
}, },
@@ -799,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info) resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -817,12 +708,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, "", false) return f.purgeCheck("", false)
} }
// move a file or folder // move a file or folder
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) { func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object // Move the object
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
@@ -830,14 +721,14 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(), Parameters: fieldsValue(),
} }
move := api.UpdateFileMove{ move := api.UpdateFileMove{
Name: enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
Parent: api.Parent{ Parent: api.Parent{
ID: directoryID, ID: directoryID,
}, },
} }
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info) resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -855,7 +746,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -863,13 +754,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Do the move // Do the move
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID) info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -889,7 +780,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -905,14 +796,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// find the root src directory // find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false) err := srcFs.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true) err = f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
@@ -928,14 +819,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false) _, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -946,13 +837,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
// Do the move // Do the move
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID) _, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil { if err != nil {
return err return err
} }
@@ -961,8 +852,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false) id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts var opts rest.Opts
if err == nil { if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote) fs.Debugf(f, "attempting to share directory '%s'", remote)
@@ -974,7 +865,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
} }
} else { } else {
fs.Debugf(f, "attempting to share single file '%s'", remote) fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -994,7 +885,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
var info api.Item var info api.Item
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info) resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return info.SharedLink.URL, err return info.SharedLink.URL, err
@@ -1031,8 +922,13 @@ func (o *Object) Remote() string {
return o.remote return o.remote
} }
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 { if t != hash.SHA1 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1041,7 +937,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
err := o.readMetaData(context.TODO()) err := o.readMetaData()
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return 0 return 0
@@ -1066,11 +962,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
// //
// it also sets the info // it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if o.hasMetaData { if o.hasMetaData {
return nil return nil
} }
info, err := o.fs.readMetaDataForPath(ctx, o.remote) info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil { if err != nil {
if apiErr, ok := err.(*api.Error); ok { if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" { if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
@@ -1087,8 +983,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1097,7 +993,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// setModTime sets the modification time of the local fs object // setModTime sets the modification time of the local fs object
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) { func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
Path: "/files/" + o.id, Path: "/files/" + o.id,
@@ -1108,15 +1004,15 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
} }
var info *api.Item var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info) resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return info, err return info, err
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(ctx, modTime) info, err := o.setModTime(modTime)
if err != nil { if err != nil {
return err return err
} }
@@ -1129,7 +1025,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" { if o.id == "" {
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
@@ -1141,7 +1037,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options, Options: options,
} }
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1153,9 +1049,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload // upload does a single non-multipart upload
// //
// This is recommended for less than 50 MB of content // This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) { func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{ upload := api.UploadFile{
Name: enc.FromStandardName(leaf), Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime), ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime), ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{ Parent: api.Parent{
@@ -1180,7 +1076,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
opts.Path = "/files/content" opts.Path = "/files/content"
} }
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result) resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -1197,32 +1093,32 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// If existing is set then it updates the object rather than creating a new one // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime()
remote := o.Remote() remote := o.Remote()
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return err return err
} }
// Upload with simple or multipart // Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) { if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime) err = o.upload(in, leaf, directoryID, modTime)
} else { } else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime) err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
} }
return err return err
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.deleteObject(ctx, o.id) return o.fs.deleteObject(o.id)
} }
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not

View File

@@ -4,8 +4,8 @@ package box_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/box" "github.com/ncw/rclone/backend/box"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -4,7 +4,6 @@ package box
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
@@ -15,15 +14,15 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
) )
// createUploadSession creates an upload session for the object // createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) { func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions", Path: "/files/upload_sessions",
@@ -38,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else { } else {
opts.Path = "/files/upload_sessions" opts.Path = "/files/upload_sessions"
request.FolderID = directoryID request.FolderID = directoryID
request.FileName = enc.FromStandardName(leaf) request.FileName = replaceReservedChars(leaf)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response) resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return return
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
} }
// uploadPart uploads a part in an upload session // uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) { func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk)) chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk) sha1sum := sha1.Sum(chunk)
opts := rest.Opts{ opts := rest.Opts{
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk)) opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response) resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
} }
// commitUpload finishes an upload session // commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) { func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit", Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -98,14 +97,14 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
var body []byte var body []byte
var resp *http.Response var resp *http.Response
// For discussion of this value see: // For discussion of this value see:
// https://github.com/rclone/rclone/issues/2054 // https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
outer: outer:
for tries = 0; tries < maxTries; tries++ { for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil { if err != nil {
return shouldRetry(resp, err) return shouldRetry(resp, err)
} }
@@ -113,7 +112,7 @@ outer:
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
delay := defaultDelay delay := defaultDelay
var why string why := "unknown"
if err != nil { if err != nil {
// Sometimes we get 400 Error with // Sometimes we get 400 Error with
// parts_mismatch immediately after uploading // parts_mismatch immediately after uploading
@@ -155,7 +154,7 @@ outer:
} }
// abortUpload cancels an upload session // abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) { func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "DELETE", Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID, Path: "/files/upload_sessions/" + SessionID,
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
} }
var resp *http.Response var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts) resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
return err return err
} }
// uploadMultipart uploads a file using multipart upload // uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) { func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session // Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size) session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload create session failed") return errors.Wrap(err, "multipart upload create session failed")
} }
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
defer func() { defer func() {
if err != nil { if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err) fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID) cancelErr := o.abortUpload(session.ID)
if cancelErr != nil { if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err) fs.Logf(o, "Failed to cancel multipart upload: %v", err)
} }
@@ -236,7 +235,7 @@ outer:
defer wg.Done() defer wg.Done()
defer o.fs.uploadToken.Put() defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize)) fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap) partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil { if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part") err = errors.Wrap(err, "multipart upload failed to upload part")
select { select {
@@ -264,7 +263,7 @@ outer:
} }
// Finalise the upload session // Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil)) result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil { if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize") return errors.Wrap(err, "multipart upload failed to finalize")
} }

185
backend/cache/cache.go vendored
View File

@@ -18,19 +18,18 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/atexit"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
@@ -482,7 +481,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath) return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
} }
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath) f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err) return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
} }
@@ -509,7 +508,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1) pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval) pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval) doChangeNotify(f.receiveChangeNotify, pollInterval)
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -600,7 +599,7 @@ is used on top of the cache.
return f, fsErr return f, fsErr
} }
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
m, err := f.Stats() m, err := f.Stats()
if err != nil { if err != nil {
@@ -627,7 +626,7 @@ func (f *Fs) unwrapRemote(remote string) string {
return remote return remote
} }
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) { func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
remoteInt, ok := in["remote"] remoteInt, ok := in["remote"]
if !ok { if !ok {
@@ -672,7 +671,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
return out, nil return out, nil
} }
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) { func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
type chunkRange struct { type chunkRange struct {
start, end int64 start, end int64
} }
@@ -777,18 +776,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
for _, pair := range files { for _, pair := range files {
file, remote := pair[0], pair[1] file, remote := pair[0], pair[1]
var status fileStatus var status fileStatus
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
co := o.(*Object) co := o.(*Object)
err = co.refreshFromSource(ctx, true) err = co.refreshFromSource(true)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
handle := NewObjectHandle(ctx, co, f) handle := NewObjectHandle(co, f)
handle.UseMemory = false handle.UseMemory = false
handle.scaleWorkers(1) handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) { walkChunkRanges(crs, co.Size(), func(chunk int64) {
@@ -874,7 +873,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subscribe multiple callers // ChangeNotify can subscribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it) // this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes // and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.parentsForgetMu.Lock() f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock() defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify") fs.Debugf(f, "subscribing to ChangeNotify")
@@ -921,7 +920,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
var err error var err error
fs.Debugf(f, "new object '%s'", remote) fs.Debugf(f, "new object '%s'", remote)
@@ -940,16 +939,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// search for entry in source or temp fs // search for entry in source or temp fs
var obj fs.Object var obj fs.Object
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
obj, err = f.tempFs.NewObject(ctx, remote) obj, err = f.tempFs.NewObject(remote)
// not found in temp fs // not found in temp fs
if err != nil { if err != nil {
fs.Debugf(remote, "find: not found in local cache fs") fs.Debugf(remote, "find: not found in local cache fs")
obj, err = f.Fs.NewObject(ctx, remote) obj, err = f.Fs.NewObject(remote)
} else { } else {
fs.Debugf(obj, "find: found in local cache fs") fs.Debugf(obj, "find: found in local cache fs")
} }
} else { } else {
obj, err = f.Fs.NewObject(ctx, remote) obj, err = f.Fs.NewObject(remote)
} }
// not found in either fs // not found in either fs
@@ -959,13 +958,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
} }
// cache the new entry // cache the new entry
co = ObjectFromOriginal(ctx, f, obj).persist() co = ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "find: cached object") fs.Debugf(co, "find: cached object")
return co, nil return co, nil
} }
// List the objects and directories in dir into entries // List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "list '%s'", dir) fs.Debugf(f, "list '%s'", dir)
cd := ShallowDirectory(f, dir) cd := ShallowDirectory(f, dir)
@@ -995,12 +994,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries) fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
for _, queuedRemote := range queuedEntries { for _, queuedRemote := range queuedEntries {
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote)) queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
if err != nil { if err != nil {
fs.Debugf(dir, "list: temp file not found in local fs: %v", err) fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
continue continue
} }
co := ObjectFromOriginal(ctx, f, queuedEntry).persist() co := ObjectFromOriginal(f, queuedEntry).persist()
fs.Debugf(co, "list: cached temp object") fs.Debugf(co, "list: cached temp object")
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
} }
@@ -1008,7 +1007,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
// search from the source // search from the source
sourceEntries, err := f.Fs.List(ctx, dir) sourceEntries, err := f.Fs.List(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1046,11 +1045,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if i < tmpCnt && cachedEntries[i].Remote() == oRemote { if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
continue continue
} }
co := ObjectFromOriginal(ctx, f, o).persist() co := ObjectFromOriginal(f, o).persist()
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
fs.Debugf(dir, "list: cached object: %v", co) fs.Debugf(dir, "list: cached object: %v", co)
case fs.Directory: case fs.Directory:
cdd := DirectoryFromOriginal(ctx, f, o) cdd := DirectoryFromOriginal(f, o)
// check if the dir isn't expired and add it in cache if it isn't // check if the dir isn't expired and add it in cache if it isn't
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) { if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
batchDirectories = append(batchDirectories, cdd) batchDirectories = append(batchDirectories, cdd)
@@ -1080,8 +1079,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error { func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1089,7 +1088,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
for i := 0; i < len(entries); i++ { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(innerDir.Remote(), list)
if err != nil { if err != nil {
return err return err
} }
@@ -1106,21 +1105,21 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
// from dir recursively into out. // from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "list recursively from '%s'", dir) fs.Debugf(f, "list recursively from '%s'", dir)
// we check if the source FS supports ListR // we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return // if it does, we'll use that to get all the entries, cache them and return
do := f.Fs.Features().ListR do := f.Fs.Features().ListR
if do != nil { if do != nil {
return do(ctx, dir, func(entries fs.DirEntries) error { return do(dir, func(entries fs.DirEntries) error {
// we got called back with a set of entries so let's cache them and call the original callback // we got called back with a set of entries so let's cache them and call the original callback
for _, entry := range entries { for _, entry := range entries {
switch o := entry.(type) { switch o := entry.(type) {
case fs.Object: case fs.Object:
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o)) _ = f.cache.AddObject(ObjectFromOriginal(f, o))
case fs.Directory: case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) _ = f.cache.AddDir(DirectoryFromOriginal(f, o))
default: default:
return errors.Errorf("Unknown object type %T", entry) return errors.Errorf("Unknown object type %T", entry)
} }
@@ -1133,7 +1132,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(dir, list)
if err != nil { if err != nil {
return err return err
} }
@@ -1142,9 +1141,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
fs.Debugf(f, "mkdir '%s'", dir) fs.Debugf(f, "mkdir '%s'", dir)
err := f.Fs.Mkdir(ctx, dir) err := f.Fs.Mkdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1172,7 +1171,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
fs.Debugf(f, "rmdir '%s'", dir) fs.Debugf(f, "rmdir '%s'", dir)
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
@@ -1182,9 +1181,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
_, err := f.UnWrap().List(ctx, dir) _, err := f.UnWrap().List(dir)
if err == nil { if err == nil {
err := f.Fs.Rmdir(ctx, dir) err := f.Fs.Rmdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1192,10 +1191,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo) co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
} }
} }
@@ -1212,7 +1211,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
} }
} else { } else {
err := f.Fs.Rmdir(ctx, dir) err := f.Fs.Rmdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1243,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations. // using server side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote) fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
@@ -1265,8 +1264,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
f.backgroundRunner.pause() f.backgroundRunner.pause()
defer f.backgroundRunner.play() defer f.backgroundRunner.play()
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote) _, errInWrap := srcFs.UnWrap().List(srcRemote)
_, errInTemp := f.tempFs.List(ctx, srcRemote) _, errInTemp := f.tempFs.List(srcRemote)
// not found in either fs // not found in either fs
if errInWrap != nil && errInTemp != nil { if errInWrap != nil && errInTemp != nil {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
@@ -1275,7 +1274,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
if errInWrap == nil { if errInWrap == nil {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1288,10 +1287,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
var queuedEntries []*Object var queuedEntries []*Object
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo) co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
if co.tempFileStartedUpload() { if co.tempFileStartedUpload() {
fs.Errorf(co, "can't move - upload has already started. need to finish that") fs.Errorf(co, "can't move - upload has already started. need to finish that")
@@ -1312,16 +1311,16 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs") fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
err = do(ctx, f.tempFs, srcRemote, dstRemote) err = do(f.tempFs, srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
err = f.cache.ReconcileTempUploads(ctx, f) err = f.cache.ReconcileTempUploads(f)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1427,10 +1426,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
} }
} }
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put in to the remote path // put in to the remote path
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
var err error var err error
var obj fs.Object var obj fs.Object
@@ -1441,7 +1440,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
_ = f.cache.ExpireDir(parentCd) _ = f.cache.ExpireDir(parentCd)
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
obj, err = f.tempFs.Put(ctx, in, src, options...) obj, err = f.tempFs.Put(in, src, options...)
if err != nil { if err != nil {
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err) fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
return nil, err return nil, err
@@ -1456,14 +1455,14 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// if cache writes is enabled write it first through cache // if cache writes is enabled write it first through cache
} else if f.opt.StoreWrites { } else if f.opt.StoreWrites {
f.cacheReader(in, src, func(inn io.Reader) { f.cacheReader(in, src, func(inn io.Reader) {
obj, err = put(ctx, inn, src, options...) obj, err = put(inn, src, options...)
}) })
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache") fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
} }
// last option: save it directly in remote fs // last option: save it directly in remote fs
} else { } else {
obj, err = put(ctx, in, src, options...) obj, err = put(in, src, options...)
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs") fs.Debugf(obj, "put: uploaded to remote fs")
} }
@@ -1475,7 +1474,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// cache the new file // cache the new file
cachedObj := ObjectFromOriginal(ctx, f, obj) cachedObj := ObjectFromOriginal(f, obj)
// deleting cached chunks and info to be replaced with new ones // deleting cached chunks and info to be replaced with new ones
_ = f.cache.RemoveObject(cachedObj.abs()) _ = f.cache.RemoveObject(cachedObj.abs())
@@ -1498,33 +1497,33 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fs.Debugf(f, "put data at '%s'", src.Remote()) fs.Debugf(f, "put data at '%s'", src.Remote())
return f.put(ctx, in, src, options, f.Fs.Put) return f.put(in, src, options, f.Fs.Put)
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
} }
fs.Debugf(f, "put data unchecked in '%s'", src.Remote()) fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
return f.put(ctx, in, src, options, do) return f.put(in, src, options, do)
} }
// PutStream uploads the object // PutStream uploads the object
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutStream do := f.Fs.Features().PutStream
if do == nil { if do == nil {
return nil, errors.New("can't PutStream") return nil, errors.New("can't PutStream")
} }
fs.Debugf(f, "put data streaming in '%s'", src.Remote()) fs.Debugf(f, "put data streaming in '%s'", src.Remote())
return f.put(ctx, in, src, options, do) return f.put(in, src, options, do)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote) fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
@@ -1544,7 +1543,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil { if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't copy %v - %v", src, err) fs.Errorf(f, "can't copy %v - %v", src, err)
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
@@ -1563,7 +1562,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
} }
obj, err := do(ctx, srcObj.Object, remote) obj, err := do(srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving in cache: %v", err) fs.Errorf(srcObj, "error moving in cache: %v", err)
return nil, err return nil, err
@@ -1571,7 +1570,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(obj, "copy: file copied") fs.Debugf(obj, "copy: file copied")
// persist new // persist new
co := ObjectFromOriginal(ctx, f, obj).persist() co := ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "copy: added to cache") fs.Debugf(co, "copy: added to cache")
// expire the destination path // expire the destination path
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote()))) parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
@@ -1598,7 +1597,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote) fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
// if source fs doesn't support move abort // if source fs doesn't support move abort
@@ -1619,7 +1618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil { if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't move %v - %v", src, err) fs.Errorf(f, "can't move %v - %v", src, err)
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
@@ -1655,7 +1654,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(srcObj, "move: queued file moved to %v", remote) fs.Debugf(srcObj, "move: queued file moved to %v", remote)
} }
obj, err := do(ctx, srcObj.Object, remote) obj, err := do(srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving: %v", err) fs.Errorf(srcObj, "error moving: %v", err)
return nil, err return nil, err
@@ -1680,7 +1679,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
// persist new // persist new
cachedObj := ObjectFromOriginal(ctx, f, obj).persist() cachedObj := ObjectFromOriginal(f, obj).persist()
fs.Debugf(cachedObj, "move: added to cache") fs.Debugf(cachedObj, "move: added to cache")
// expire new parent // expire new parent
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote()))) parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
@@ -1702,7 +1701,7 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
fs.Infof(f, "purging cache") fs.Infof(f, "purging cache")
f.cache.Purge() f.cache.Purge()
@@ -1711,7 +1710,7 @@ func (f *Fs) Purge(ctx context.Context) error {
return nil return nil
} }
err := do(ctx) err := do()
if err != nil { if err != nil {
return err return err
} }
@@ -1720,7 +1719,7 @@ func (f *Fs) Purge(ctx context.Context) error {
} }
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
f.CleanUpCache(false) f.CleanUpCache(false)
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
@@ -1728,16 +1727,16 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil return nil
} }
return do(ctx) return do()
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do(ctx) return do()
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage
@@ -1864,24 +1863,6 @@ func cleanPath(p string) string {
return p return p
} }
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -1897,6 +1878,4 @@ var (
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil) _ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
) )

View File

@@ -4,7 +4,6 @@ package cache_test
import ( import (
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
goflag "flag" goflag "flag"
"fmt" "fmt"
@@ -22,20 +21,19 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -122,7 +120,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder) listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err) require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "") listInner, err := rootFs2.List("")
require.NoError(t, err) require.NoError(t, err)
require.Len(t, listRoot, 1) require.Len(t, listRoot, 1)
@@ -140,10 +138,10 @@ func TestInternalVfsCache(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content") runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test") _, err = rootFs.List("test")
require.NoError(t, err) require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize) testReader := runInstance.randomReader(t, testSize)
@@ -268,7 +266,7 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject("404")
require.Error(t, err) require.Error(t, err)
require.Nil(t, obj) require.Nil(t, obj)
} }
@@ -356,8 +354,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64) testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err) require.NoError(t, err)
} else { } else {
testData1 = []byte(random.String(100)) testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(random.String(200)) testData2 = []byte(fstest.RandomString(200))
} }
// write the object // write the object
@@ -447,7 +445,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
log.Printf("original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
expectedSize := int64(len([]byte("test content"))) expectedSize := int64(len([]byte("test content")))
var data2 []byte var data2 []byte
@@ -459,7 +457,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content") data2 = []byte("test content")
} }
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) err = o.Update(bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
@@ -505,9 +503,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount // list in mount
@@ -517,7 +515,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -591,9 +589,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
err = rootFs.Mkdir(context.Background(), "test") err = rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one") err = rootFs.Mkdir("test/one")
require.NoError(t, err) require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -610,7 +608,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found) require.False(t, found)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -672,23 +670,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime) err = o.SetModTime(wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin") co, err := rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
cfs.DirCacheFlush() // flush the cache cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin") co, err = rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
} }
func TestInternalChangeSeenAfterRc(t *testing.T) { func TestInternalChangeSeenAfterRc(t *testing.T) {
@@ -715,19 +713,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime) err = o.SetModTime(wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin") co, err := rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function // Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"}) m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -735,9 +733,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Contains(t, m["message"], "cached file cleared") require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin") co, err = rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
_, err = runInstance.list(t, rootFs, "") _, err = runInstance.list(t, rootFs, "")
require.NoError(t, err) require.NoError(t, err)
@@ -751,7 +749,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Len(t, li1, 1) require.Len(t, li1, 1)
// Call the rc function // Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"}) m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -796,7 +794,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data // create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
@@ -835,7 +833,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, l, 1) require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third")) err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err) require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test") l, err = runInstance.list(t, rootFs, "test")
@@ -870,14 +868,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test") err = cfs.UnWrap().Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
for i := 1; i <= 4; i++ { for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
require.NoError(t, err) require.NoError(t, err)
for j := 1; j <= 4; j++ { for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -1082,10 +1080,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = f.Features().Purge(context.Background()) _ = f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir("")
require.NoError(t, err) require.NoError(t, err)
if r.useMount && !r.isMounted { if r.useMount && !r.isMounted {
r.mountFs(t, f) r.mountFs(t, f)
@@ -1099,7 +1097,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
r.unmountFs(t, f) r.unmountFs(t, f)
} }
err := f.Features().Purge(context.Background()) err := f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -1201,7 +1199,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data) in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in) _ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote) o, err := f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size()) require.Equal(t, int64(len(data)), o.Size())
return o return o
@@ -1210,7 +1208,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object { func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now() modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo) obj, err := f.Put(in, objInfo)
require.NoError(t, err) require.NoError(t, err)
if r.useMount { if r.useMount {
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
@@ -1230,18 +1228,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600) err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err) require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
} else { } else {
in1 := bytes.NewReader(data1) in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2) in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1) obj, err = f.Put(in1, objInfo1)
require.NoError(t, err) require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2) err = obj.Update(in2, objInfo2)
} }
require.NoError(t, err) require.NoError(t, err)
@@ -1270,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
return checkSample, err return checkSample, err
} }
} else { } else {
co, err := f.NewObject(context.Background(), remote) co, err := f.NewObject(remote)
if err != nil { if err != nil {
return checkSample, err return checkSample, err
} }
@@ -1285,7 +1283,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte { func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset}) reader, err := o.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err) require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample) totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck { if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1302,7 +1300,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
if r.useMount { if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700) err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else { } else {
err = f.Mkdir(context.Background(), remote) err = f.Mkdir(remote)
} }
require.NoError(t, err) require.NoError(t, err)
} }
@@ -1314,11 +1312,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
err = os.Remove(path.Join(r.mntDir, remote)) err = os.Remove(path.Join(r.mntDir, remote))
} else { } else {
var obj fs.Object var obj fs.Object
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
if err != nil { if err != nil {
err = f.Rmdir(context.Background(), remote) err = f.Rmdir(remote)
} else { } else {
err = obj.Remove(context.Background()) err = obj.Remove()
} }
} }
@@ -1336,7 +1334,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll) l = append(l, ll)
} }
@@ -1355,7 +1353,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll.Remote()) l = append(l, ll.Remote())
} }
@@ -1395,7 +1393,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil { } else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst) err = rootFs.Features().DirMove(rootFs, src, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1417,11 +1415,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil { } else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Move(context.Background(), obj1, dst) _, err = rootFs.Features().Move(obj1, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1443,11 +1441,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil { } else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src) obj, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Copy(context.Background(), obj, dst) _, err = rootFs.Features().Copy(obj, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1469,11 +1467,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
} }
return fi.ModTime(), nil return fi.ModTime(), nil
} }
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
return obj1.ModTime(context.Background()), nil return obj1.ModTime(), nil
} }
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
@@ -1486,7 +1484,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
} }
return fi.Size(), nil return fi.Size(), nil
} }
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return int64(0), err return int64(0), err
} }
@@ -1509,14 +1507,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
_, err = f.WriteString(data + append) _, err = f.WriteString(data + append)
} else { } else {
var obj1 fs.Object var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src) obj1, err = rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
data1 := []byte(data + append) data1 := []byte(data + append)
r := bytes.NewReader(data1) r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs) objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1) err = obj1.Update(r, objInfo1)
} }
return err return err

View File

@@ -9,9 +9,9 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount" "github.com/ncw/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib" "github.com/ncw/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -9,10 +9,10 @@ import (
"time" "time"
"github.com/billziss-gh/cgofuse/fuse" "github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -7,18 +7,15 @@ package cache_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })
} }

View File

@@ -3,7 +3,6 @@
package cache_test package cache_test
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
@@ -12,9 +11,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/ncw/rclone/backend/drive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -86,11 +85,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -123,11 +122,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -166,7 +165,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
@@ -234,9 +233,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "second/one") _, err = rootFs.NewObject("second/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -257,7 +256,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty") require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -271,9 +270,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -290,9 +289,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -307,7 +306,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed // test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -319,7 +318,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err) require.NoError(t, err)
obj2, err := rootFs.NewObject(context.Background(), "test/one") obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2)) require.Equal(t, "one content updated", string(data2))
@@ -367,7 +366,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -379,7 +378,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir // test Rmdir
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -390,9 +389,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.Error(t, err) require.Error(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -405,9 +404,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -422,7 +421,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove // test Remove
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

View File

@@ -3,11 +3,10 @@
package cache package cache
import ( import (
"context"
"path" "path"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
) )
// Directory is a generic dir that stores basic information about it // Directory is a generic dir that stores basic information about it
@@ -56,7 +55,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
} }
// DirectoryFromOriginal builds one from a generic fs.Directory // DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory { func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote()) fullRemote := path.Join(f.Root(), d.Remote())
@@ -68,7 +67,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
CacheFs: f, CacheFs: f,
Name: name, Name: name,
Dir: dir, Dir: dir,
CacheModTime: d.ModTime(ctx).UnixNano(), CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(), CacheSize: d.Size(),
CacheItems: d.Items(), CacheItems: d.Items(),
CacheType: "Directory", CacheType: "Directory",
@@ -111,7 +110,7 @@ func (d *Directory) parentRemote() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time { func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime) return time.Unix(0, d.CacheModTime)
} }

View File

@@ -3,7 +3,6 @@
package cache package cache
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"path" "path"
@@ -12,9 +11,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
) )
var uploaderMap = make(map[string]*backgroundWriter) var uploaderMap = make(map[string]*backgroundWriter)
@@ -41,7 +40,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle // Handle is managing the read/write/seek operations on an open handle
type Handle struct { type Handle struct {
ctx context.Context
cachedObject *Object cachedObject *Object
cfs *Fs cfs *Fs
memory *Memory memory *Memory
@@ -60,9 +58,8 @@ type Handle struct {
} }
// NewObjectHandle returns a new Handle for an existing Object // NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle { func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{ r := &Handle{
ctx: ctx,
cachedObject: o, cachedObject: o,
cfs: cfs, cfs: cfs,
offset: 0, offset: 0,
@@ -354,7 +351,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc r := w.rc
if w.rc == nil { if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -364,7 +361,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen { if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok { if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset) _, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err return r, err
} else if do, ok := r.(io.Seeker); ok { } else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart) _, err = do.Seek(offset, io.SeekStart)
@@ -374,7 +371,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close() _ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -452,7 +449,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort // we seem to be getting only errors so we abort
if err != nil { if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err) fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -465,7 +462,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data) sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -591,7 +588,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath) remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil) b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload") fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote) err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil { if err != nil {
b.notify(remote, BackgroundUploadError, err) b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath) _ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -601,14 +598,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root // clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote)) thisDir := cleanPath(path.Dir(remote))
for thisDir != "" { for thisDir != "" {
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir) thisList, err := b.fs.tempFs.List(thisDir)
if err != nil { if err != nil {
break break
} }
if len(thisList) > 0 { if len(thisList) > 0 {
break break
} }
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir) err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path") fs.Debugf(thisDir, "cleaned from temp path")
if err != nil { if err != nil {
break break

View File

@@ -3,16 +3,15 @@
package cache package cache
import ( import (
"context"
"io" "io"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
) )
const ( const (
@@ -69,7 +68,7 @@ func NewObject(f *Fs, remote string) *Object {
} }
// ObjectFromOriginal builds one from a generic fs.Object // ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote) dir, name := path.Split(fullRemote)
@@ -93,13 +92,13 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
CacheType: cacheType, CacheType: cacheType,
CacheTs: time.Now(), CacheTs: time.Now(),
} }
co.updateData(ctx, o) co.updateData(o)
return co return co
} }
func (o *Object) updateData(ctx context.Context, source fs.Object) { func (o *Object) updateData(source fs.Object) {
o.Object = source o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano() o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size() o.CacheSize = source.Size()
o.CacheStorable = source.Storable() o.CacheStorable = source.Storable()
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -131,20 +130,20 @@ func (o *Object) abs() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
_ = o.refresh(ctx) _ = o.refresh()
return time.Unix(0, o.CacheModTime) return time.Unix(0, o.CacheModTime)
} }
// Size returns the cached Size // Size returns the cached Size
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheSize return o.CacheSize
} }
// Storable returns the cached Storable // Storable returns the cached Storable
func (o *Object) Storable() bool { func (o *Object) Storable() bool {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheStorable return o.CacheStorable
} }
@@ -152,18 +151,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh // all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet // 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs // 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error { func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified { if !isExpired && !isNotified {
return nil return nil
} }
return o.refreshFromSource(ctx, true) return o.refreshFromSource(true)
} }
// refreshFromSource requests the original FS for the object in case it comes from a cached entry // refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error { func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock() o.refreshMutex.Lock()
defer o.refreshMutex.Unlock() defer o.refreshMutex.Unlock()
var err error var err error
@@ -173,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
return nil return nil
} }
if o.isTempFile() { if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else { } else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
} }
if err != nil { if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err) fs.Errorf(o, "error refreshing object in : %v", err)
return err return err
} }
o.updateData(ctx, liveObject) o.updateData(liveObject)
o.persist() o.persist()
return nil return nil
} }
// SetModTime sets the ModTime of this object // SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error { func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
err := o.Object.SetModTime(ctx, t) err := o.Object.SetModTime(t)
if err != nil { if err != nil {
return err return err
} }
@@ -208,19 +207,19 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
} }
// Open is used to request a specific part of the file using fs.RangeOption // Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error var err error
if o.Object == nil { if o.Object == nil {
err = o.refreshFromSource(ctx, true) err = o.refreshFromSource(true)
} else { } else {
err = o.refresh(ctx) err = o.refresh()
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
cacheReader := NewObjectHandle(ctx, o, o.CacheFs) cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
@@ -239,8 +238,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
// Update will change the object data // Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -255,7 +254,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "updating object contents with size %v", src.Size()) fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload // FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...) err := o.Object.Update(in, src, options...)
if err != nil { if err != nil {
fs.Errorf(o, "error updating source: %v", err) fs.Errorf(o, "error updating source: %v", err)
return err return err
@@ -266,7 +265,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size() o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -276,8 +275,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove deletes the object from both the cache and the source // Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -289,7 +288,7 @@ func (o *Object) Remove(ctx context.Context) error {
return errors.Errorf("%v is currently uploading, can't delete", o) return errors.Errorf("%v is currently uploading, can't delete", o)
} }
} }
err := o.Object.Remove(ctx) err := o.Object.Remove()
if err != nil { if err != nil {
return err return err
} }
@@ -307,8 +306,8 @@ func (o *Object) Remove(ctx context.Context) error {
// Hash requests a hash of the object and stores in the cache // Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded // since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh(ctx) _ = o.refresh()
if o.CacheHashes == nil { if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
} }
@@ -317,10 +316,10 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if found { if found {
return cachedHash, nil return cachedHash, nil
} }
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return "", err return "", err
} }
liveHash, err := o.Object.Hash(ctx, ht) liveHash, err := o.Object.Hash(ht)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -14,8 +14,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/rclone/rclone/fs"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )

View File

@@ -7,9 +7,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// Memory is a wrapper of transient storage for a go-cache store // Memory is a wrapper of transient storage for a go-cache store

View File

@@ -4,7 +4,6 @@ package cache
import ( import (
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -16,10 +15,10 @@ import (
"sync" "sync"
"time" "time"
bolt "github.com/etcd-io/bbolt" bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
) )
// Constants // Constants
@@ -1015,7 +1014,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
} }
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket)) _ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -1024,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
} }
var queuedEntries []fs.Object var queuedEntries []fs.Object
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo) queuedEntries = append(queuedEntries, oo)

File diff suppressed because it is too large Load Diff

View File

@@ -1,691 +0,0 @@
package chunker
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Command line flags
var (
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
)
// test that chunking does not break large uploads
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
defer func() {
// restore original settings (f is pointer, f.opt is struct)
f.opt = saveOpt
_ = f.setChunkNameFormat(f.opt.NameFormat)
}()
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
assert.Equal(t, wantDataFormat, f.dataNameFmt)
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
}
assertFormatValid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
}
assertFormatInvalid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
}
// invalid formats
assertFormatInvalid(`chunk-#`)
assertFormatInvalid(`*-chunk`)
assertFormatInvalid(`*-*-chunk-#`)
assertFormatInvalid(`*-chunk-#-#`)
assertFormatInvalid(`#-chunk-*`)
assertFormatInvalid(`*/#`)
assertFormatValid(`*#`)
assertFormatInvalid(`**#`)
assertFormatInvalid(`#*`)
assertFormatInvalid(``)
assertFormatInvalid(`-`)
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
}
func testSmallFileInternals(t *testing.T, f *Fs) {
const dir = "small"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
checkSmallFileInternals := func(obj fs.Object) {
assert.NotNil(t, obj)
o, ok := obj.(*Object)
assert.True(t, ok)
assert.NotNil(t, o)
if o == nil {
return
}
switch {
case !f.useMeta:
// If meta format is "none", non-chunked file (even empty)
// internally is a single chunk without meta object.
assert.Nil(t, o.main)
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
assert.Equal(t, 1, len(o.chunks))
case f.hashAll:
// Consistent hashing forces meta object on small files too
assert.NotNil(t, o.main)
assert.True(t, o.isComposite())
assert.Equal(t, 1, len(o.chunks))
default:
// normally non-chunked file is kept in the Object's main field
assert.NotNil(t, o.main)
assert.False(t, o.isComposite())
assert.Equal(t, 0, len(o.chunks))
}
}
checkContents := func(obj fs.Object, contents string) {
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size())
r, err := obj.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
}
checkHashsum := func(obj fs.Object) {
var ht hash.Type
switch {
case !f.hashAll:
return
case f.useMD5:
ht = hash.MD5
case f.useSHA1:
ht = hash.SHA1
default:
return
}
// even empty files must have hashsum in consistent mode
sum, err := obj.Hash(ctx, ht)
assert.NoError(t, err)
assert.NotEqual(t, sum, "")
}
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
checkHashsum(put)
// objects returned by Put and NewObject must have similar structure
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err)
assert.NotNil(t, obj)
checkSmallFileInternals(obj)
checkContents(obj, contents)
checkHashsum(obj)
_ = obj.Remove(ctx)
_ = put.Remove(ctx) // for good
}
checkSmallFile("emptyfile", "")
checkSmallFile("smallfile", "Ok")
}
func testPreventCorruption(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "corrupted"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = true
contents := random.String(250)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
const overlapMessage = "chunk overlap"
assertOverlapError := func(err error) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), overlapMessage)
}
}
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
}
err := f.Mkdir(ctx, billyChunkName(1))
assertOverlapError(err)
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
f.opt.FailHard = true
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
assertOverlapError(err)
// you can freely read chunks (if you have an object)
r, err := billyChunk4.Open(ctx)
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
assert.NotEqual(t, contents, string(chunkContents))
// but you can't change them
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
assertOverlapError(err)
// Remove isn't special, you can't corrupt files even if you have an object
err = billyChunk4.Remove(ctx)
assertOverlapError(err)
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
assert.NoError(t, err)
require.NotNil(t, willyChunk)
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
assertOverlapError(err)
// operations.Move will return error when chunker's Move refused
// to corrupt target file, but reverts to copy/delete method
// still trying to delete target chunk. Chunker must come to rescue.
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
assertOverlapError(err)
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
}
func testChunkNumberOverflow(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "wreaked"
const wreakNumber = 10200300
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.Error(t, err)
_, err = f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = false
_ = wreak.Remove(ctx)
_ = file.Remove(ctx)
}
func testMetadataInput(t *testing.T, f *Fs) {
const minChunkForTest = 50
if f.opt.ChunkSize < minChunkForTest {
t.Skip("this test requires chunks that fit metadata")
}
const dir = "usermeta"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
o, ok := obj.(*Object)
assert.NotNil(t, ok)
if o != nil {
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
o = nil
}
defer func() {
_ = obj.Remove(ctx)
_ = part.Remove(ctx)
}()
r, err := obj.Open(ctx)
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
runSubtest(pastMeta, "past")
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
runSubtest(futureMeta, "future")
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
if *UploadKilobytes <= 0 {
t.Skip("-upload-kilobytes is not set")
}
testPutLarge(t, f, *UploadKilobytes)
})
t.Run("ChunkNameFormat", func(t *testing.T) {
testChunkNameFormat(t, f)
})
t.Run("SmallFileInternals", func(t *testing.T) {
testSmallFileInternals(t, f)
})
t.Run("PreventCorruption", func(t *testing.T) {
testPreventCorruption(t, f)
})
t.Run("ChunkNumberOverflow", func(t *testing.T) {
testChunkNumberOverflow(t, f)
})
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,58 +0,0 @@
// Test the Chunker filesystem interface
package chunker_test
import (
"flag"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/all" // for integration tests
"github.com/rclone/rclone/backend/chunker"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)
// TestIntegration runs integration tests against a concrete remote
// set by the -remote flag. If the flag is not set, it creates a
// dynamic chunker overlay wrapping a local temporary directory.
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*chunker.Object)(nil),
SkipBadWindowsCharacters: !*UseBadChars,
UnimplementableObjectMethods: []string{
"MimeType",
"GetTier",
"SetTier",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
},
}
if *fstest.RemoteName == "" {
name := "TestChunker"
opt.RemoteName = name + ":"
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
}
fstests.Run(t, &opt)
}

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"crypto/aes" "crypto/aes"
gocipher "crypto/cipher" gocipher "crypto/cipher"
"crypto/rand" "crypto/rand"
@@ -14,10 +13,10 @@ import (
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
@@ -69,7 +68,7 @@ type ReadSeekCloser interface {
} }
// OpenRangeSeek opens the file handle at the offset with the limit given // OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations // Cipher is used to swap out the encryption implementations
type Cipher interface { type Cipher interface {
@@ -86,7 +85,7 @@ type Cipher interface {
// DecryptData // DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error) DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position // DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted // EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64 EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted // DecryptedSize calculates the size of the data when decrypted
@@ -208,6 +207,21 @@ func (c *cipher) putBlock(buf []byte) {
c.buffers.Put(buf) c.buffers.Put(buf)
} }
// check to see if the byte string is valid with no control characters
// from 0x00 to 0x1F and is a valid UTF-8 string
func checkValidString(buf []byte) error {
for i := range buf {
c := buf[i]
if c >= 0x00 && c < 0x20 || c == 0x7F {
return ErrorBadDecryptControlChar
}
}
if !utf8.Valid(buf) {
return ErrorBadDecryptUTF8
}
return nil
}
// encodeFileName encodes a filename using a modified version of // encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648 // standard base32 as described in RFC4648
// //
@@ -279,6 +293,10 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if err != nil { if err != nil {
return "", err return "", err
} }
err = checkValidString(plaintext)
if err != nil {
return "", err
}
return string(plaintext), err return string(plaintext), err
} }
@@ -737,22 +755,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
} }
// newDecrypterSeek creates a new file handle decrypting on the fly // newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser var rc io.ReadCloser
doRangeSeek := false doRangeSeek := false
setLimit := false setLimit := false
// Open initially with no seek // Open initially with no seek
if offset == 0 && limit < 0 { if offset == 0 && limit < 0 {
// If no offset or limit then open whole file // If no offset or limit then open whole file
rc, err = open(ctx, 0, -1) rc, err = open(0, -1)
} else if offset == 0 { } else if offset == 0 {
// If no offset open the header + limit worth of the file // If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit) _, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true setLimit = true
} else { } else {
// Otherwise just read the header to start with // Otherwise just read the header to start with
rc, err = open(ctx, 0, int64(fileHeaderSize)) rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true doRangeSeek = true
} }
if err != nil { if err != nil {
@@ -765,7 +783,7 @@ func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
} }
fh.open = open // will be called by fh.RangeSeek fh.open = open // will be called by fh.RangeSeek
if doRangeSeek { if doRangeSeek {
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) _, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil { if err != nil {
_ = fh.Close() _ = fh.Close()
return nil, err return nil, err
@@ -885,7 +903,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit. // limiting the total length to limit.
// //
// RangeSeek with a limit of < 0 is equivalent to a regular Seek. // RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()
@@ -912,7 +930,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Can we seek underlying stream directly? // Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok { if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly // Seek underlying stream directly
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) _, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(err) return 0, fh.finish(err)
} }
@@ -922,7 +940,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
fh.rc = nil fh.rc = nil
// Re-open the underlying object with the offset given // Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
} }
@@ -951,7 +969,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Seek implements the io.Seeker interface // Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(context.TODO(), offset, whence, -1) return fh.RangeSeek(offset, whence, -1)
} }
// finish sets the final error and tidies up // finish sets the final error and tidies up
@@ -1025,8 +1043,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit) out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"encoding/base32" "encoding/base32"
"fmt" "fmt"
"io" "io"
@@ -10,8 +9,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -44,6 +43,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3") assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
} }
func TestValidString(t *testing.T) {
for _, test := range []struct {
in string
expected error
}{
{"", nil},
{"\x01", ErrorBadDecryptControlChar},
{"a\x02", ErrorBadDecryptControlChar},
{"abc\x03", ErrorBadDecryptControlChar},
{"abc\x04def", ErrorBadDecryptControlChar},
{"\x05d", ErrorBadDecryptControlChar},
{"\x06def", ErrorBadDecryptControlChar},
{"\x07", ErrorBadDecryptControlChar},
{"\x08", ErrorBadDecryptControlChar},
{"\x09", ErrorBadDecryptControlChar},
{"\x0A", ErrorBadDecryptControlChar},
{"\x0B", ErrorBadDecryptControlChar},
{"\x0C", ErrorBadDecryptControlChar},
{"\x0D", ErrorBadDecryptControlChar},
{"\x0E", ErrorBadDecryptControlChar},
{"\x0F", ErrorBadDecryptControlChar},
{"\x10", ErrorBadDecryptControlChar},
{"\x11", ErrorBadDecryptControlChar},
{"\x12", ErrorBadDecryptControlChar},
{"\x13", ErrorBadDecryptControlChar},
{"\x14", ErrorBadDecryptControlChar},
{"\x15", ErrorBadDecryptControlChar},
{"\x16", ErrorBadDecryptControlChar},
{"\x17", ErrorBadDecryptControlChar},
{"\x18", ErrorBadDecryptControlChar},
{"\x19", ErrorBadDecryptControlChar},
{"\x1A", ErrorBadDecryptControlChar},
{"\x1B", ErrorBadDecryptControlChar},
{"\x1C", ErrorBadDecryptControlChar},
{"\x1D", ErrorBadDecryptControlChar},
{"\x1E", ErrorBadDecryptControlChar},
{"\x1F", ErrorBadDecryptControlChar},
{"\x20", nil},
{"\x7E", nil},
{"\x7F", ErrorBadDecryptControlChar},
{"£100", nil},
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
{"£100", nil},
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
{"a", nil}, // Valid ASCII
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
} {
actual := checkValidString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
}
}
func TestEncodeFileName(t *testing.T) { func TestEncodeFileName(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in string in string
@@ -147,6 +209,8 @@ func TestDecryptSegment(t *testing.T) {
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize}, {encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong}, {encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
} { } {
actual, actualErr := c.decryptSegment(test.in) actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr)) assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
@@ -640,16 +704,16 @@ var (
// Test test infrastructure first! // Test test infrastructure first!
func TestRandomSource(t *testing.T) { func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8) source := newRandomSource(1E8)
sink := newRandomSource(1e8) sink := newRandomSource(1E8)
n, err := io.Copy(sink, source) n, err := io.Copy(sink, source)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(1e8), n) assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8) source = newRandomSource(1E8)
buf := make([]byte, 16) buf := make([]byte, 16)
_, _ = source.Read(buf) _, _ = source.Read(buf)
sink = newRandomSource(1e8) sink = newRandomSource(1E8)
_, err = io.Copy(sink, source) _, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream") assert.Error(t, err, "Error in stream")
} }
@@ -689,23 +753,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
} }
func TestEncryptDecrypt1(t *testing.T) { func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7) testEncryptDecrypt(t, 1, 1E7)
} }
func TestEncryptDecrypt32(t *testing.T) { func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8) testEncryptDecrypt(t, 32, 1E8)
} }
func TestEncryptDecrypt4096(t *testing.T) { func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8) testEncryptDecrypt(t, 4096, 1E8)
} }
func TestEncryptDecrypt65536(t *testing.T) { func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8) testEncryptDecrypt(t, 65536, 1E8)
} }
func TestEncryptDecrypt65537(t *testing.T) { func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8) testEncryptDecrypt(t, 65537, 1E8)
} }
var ( var (
@@ -738,7 +802,7 @@ func TestEncryptData(t *testing.T) {
} { } {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works // Check encode works
buf := bytes.NewBuffer(test.in) buf := bytes.NewBuffer(test.in)
@@ -761,7 +825,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) { func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{} z := &zeroes{}
@@ -788,7 +852,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil) fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n) assert.Equal(t, int64(32), n)
} }
@@ -820,7 +884,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) { func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true) c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err) assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0)) cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd) fh, err := c.newDecrypter(cd)
@@ -871,7 +935,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newDecrypter(in) fh, err := c.newDecrypter(in)
assert.NoError(t, err) assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6) n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err) assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n) assert.Equal(t, int64(16), n)
} }
@@ -901,7 +965,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset // Open stream with a seek of underlyingOffset
var reader io.ReadCloser var reader io.ReadCloser
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit) end = int(underlyingOffset + underlyingLimit)
@@ -942,7 +1006,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit)) rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(rc, offset, limit) check(rc, offset, limit)
@@ -950,14 +1014,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} }
// Try decoding it with a single open and lots of seeks // Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1) fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit)) _, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(fh, offset, limit) check(fh, offset, limit)
@@ -1008,7 +1072,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} { } {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit) what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0 callCount := 0
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount { switch callCount {
case 0: case 0:
assert.Equal(t, int64(0), underlyingOffset, what) assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1020,11 +1084,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what) t.Errorf("Too many calls %d for %s", callCount+1, what)
} }
callCount++ callCount++
return open(ctx, underlyingOffset, underlyingLimit) return open(underlyingOffset, underlyingLimit)
} }
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1) fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit) gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset) assert.Equal(t, gotOffset, test.offset)
} }

View File

@@ -2,20 +2,19 @@
package crypt package crypt
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
) )
// Globals // Globals
@@ -63,7 +62,6 @@ func init() {
Name: "password", Name: "password",
Help: "Password or pass phrase for encryption.", Help: "Password or pass phrase for encryption.",
IsPassword: true, IsPassword: true,
Required: true,
}, { }, {
Name: "password2", Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
@@ -171,10 +169,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false, WriteMimeType: false,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err return f, err
} }
@@ -191,7 +202,6 @@ type Options struct {
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
type Fs struct { type Fs struct {
fs.Fs fs.Fs
wrapper fs.Fs
name string name string
root string root string
opt Options opt Options
@@ -234,7 +244,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
} }
// Encrypt an directory file name to entries. // Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -244,18 +254,18 @@ func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Director
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(ctx, dir)) *entries = append(*entries, f.newDir(dir))
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
f.addDir(ctx, &newEntries, x) f.addDir(&newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, errors.Errorf("Unknown object type %T", entry)
} }
@@ -272,12 +282,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir)) entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.encryptEntries(ctx, entries) return f.encryptEntries(entries)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -296,9 +306,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(ctx, entries) newEntries, err := f.encryptEntries(entries)
if err != nil { if err != nil {
return err return err
} }
@@ -307,18 +317,18 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.newObject(o), nil return f.newObject(o), nil
} }
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
@@ -344,7 +354,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// Transfer the data // Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...) o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -353,13 +363,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if ht != hash.None && hasher != nil { if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
var dstHash string var dstHash string
dstHash, err = o.Hash(ctx, ht) dstHash, err = o.Hash(ht)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, errors.Wrap(err, "failed to read destination hash")
} }
if srcHash != "" && dstHash != "" && srcHash != dstHash { if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object // remove object
err = o.Remove(ctx) err = o.Remove()
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
@@ -375,13 +385,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Put) return f.put(in, src, options, f.Fs.Put)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Features().PutStream) return f.put(in, src, options, f.Fs.Features().PutStream)
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@@ -392,15 +402,15 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
// //
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
@@ -409,12 +419,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// quicker than just running Remove() on the result of List() // quicker than just running Remove() on the result of List()
// //
// Return an error if it doesn't exist // Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return do(ctx) return do()
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -426,7 +436,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
if do == nil { if do == nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
@@ -435,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -451,7 +461,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move do := f.Fs.Features().Move
if do == nil { if do == nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
@@ -460,7 +470,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -475,7 +485,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
if do == nil { if do == nil {
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
@@ -485,14 +495,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
// //
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
@@ -501,7 +511,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil { if err != nil {
return nil, err return nil, err
} }
o, err := do(ctx, wrappedIn, f.newObjectInfo(src)) o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -512,21 +522,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files. // otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
if do == nil { if do == nil {
return errors.New("can't CleanUp") return errors.New("can't CleanUp")
} }
return do(ctx) return do()
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do(ctx) return do()
} }
// UnWrap returns the Fs that this Fs is wrapping // UnWrap returns the Fs that this Fs is wrapping
@@ -534,16 +544,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs return f.Fs
} }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name // EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string { func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName) return f.cipher.EncryptFileName(fileName)
@@ -558,10 +558,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
// src with it, and calculates the hash given by HashType on the fly // src with it, and calculates the hash given by HashType on the fly
// //
// Note that we break lots of encapsulation in this function. // Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in // Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
} }
@@ -591,7 +591,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
} }
// Open the src for input // Open the src for input
in, err = src.Open(ctx) in, err = src.Open()
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open src") return "", errors.Wrap(err, "failed to open src")
} }
@@ -616,75 +616,6 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return m.Sums()[hashType], nil return m.Sums()[hashType], nil
} }
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
var (
err error
decrypted string
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
// //
// This decrypts the remote name and decrypts the data // This decrypts the remote name and decrypts the data
@@ -735,7 +666,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -745,7 +676,7 @@ func (o *Object) UnWrap() fs.Object {
} }
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -759,10 +690,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
openOptions = append(openOptions, option) openOptions = append(openOptions, option)
} }
} }
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 { if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek // Open with no seek
return o.Object.Open(ctx, openOptions...) return o.Object.Open(openOptions...)
} }
// Open stream with a range of underlyingOffset, underlyingLimit // Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1) end := int64(-1)
@@ -773,7 +704,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(ctx, newOpenOptions...) return o.Object.Open(newOpenOptions...)
}, offset, limit) }, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -782,17 +713,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(ctx, in, src, options...) return o.Object, o.Object.Update(in, src, options...)
} }
_, err := o.f.put(ctx, in, src, options, update) _, err := o.f.put(in, src, options, update)
return err return err
} }
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir) newDir := fs.NewDirCopy(dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -803,24 +734,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
return newDir return newDir
} }
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source // ObjectInfo describes a wrapped fs.ObjectInfo for being the source
// //
// This encrypts the remote name and adjusts the size // This encrypts the remote name and adjusts the size
@@ -857,38 +770,10 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil return "", nil
} }
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -902,17 +787,7 @@ var (
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
) )

View File

@@ -6,13 +6,13 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/rclone/rclone/backend/crypt" "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" // for integration tests _ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/swift" // for integration tests _ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/rclone/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
@@ -21,10 +21,8 @@ func TestIntegration(t *testing.T) {
t.Skip("Skipping as -remote not set") t.Skip("Skipping as -remote not set")
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -44,8 +42,6 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -65,8 +61,6 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -86,8 +80,6 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,9 @@
// +build go1.9
package drive package drive
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -11,11 +12,11 @@ import (
"strings" "strings"
"testing" "testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
@@ -196,7 +197,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc") err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -210,7 +211,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods") err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -221,10 +222,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("txt") f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.txt") obj, err := f.NewObject("example2.txt")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open(context.Background()) rc, err := obj.Open()
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()
@@ -247,10 +248,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("link.html") f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.link.html") obj, err := f.NewObject("example2.link.html")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open(context.Background()) rc, err := obj.Open()
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()

View File

@@ -1,12 +1,14 @@
// Test Drive filesystem interface // Test Drive filesystem interface
// +build go1.9
package drive package drive
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -8,10 +8,11 @@
// //
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS // This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive package drive
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -20,10 +21,10 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
) )
@@ -51,13 +52,15 @@ type resumableUpload struct {
} }
// Upload the io.Reader in of size bytes with contentType and info // Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) { func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{ params := url.Values{
"alt": {"json"}, "alt": {"json"},
"uploadType": {"resumable"}, "uploadType": {"resumable"},
"fields": {partialFields}, "fields": {partialFields},
} }
params.Set("supportsAllDrives", "true") if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever { if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true") params.Set("keepRevisionForever", "true")
} }
@@ -82,7 +85,6 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
if err != nil { if err != nil {
return false, err return false, err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{ googleapi.Expand(req.URL, map[string]string{
"fileId": fileID, "fileId": fileID,
}) })
@@ -108,13 +110,12 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType, MediaType: contentType,
ContentLength: size, ContentLength: size,
} }
return rx.Upload(ctx) return rx.Upload()
} }
// Make an http.Request for the range passed in // Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request { func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body) req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize req.ContentLength = reqSize
if reqSize != 0 { if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength)) req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
@@ -132,8 +133,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far // Query drive for the amount transferred so far
// //
// If error is nil, then start should be valid // If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) { func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(ctx, 0, nil, 0) req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 0, err return 0, err
@@ -160,9 +161,9 @@ func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err
} }
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil // Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) { func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart) _, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize) req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req) res, err := rx.f.client.Do(req)
if err != nil { if err != nil {
return 599, err return 599, err
@@ -195,7 +196,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input // Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries // It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0) start := int64(0)
var StatusCode int var StatusCode int
var err error var err error
@@ -210,7 +211,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
// Transfer the chunk // Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) { err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize) fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize) StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err) again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK { if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false again = false

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View File

@@ -22,7 +22,6 @@ of path_display and all will be well.
*/ */
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -38,24 +37,20 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
const enc = encodings.Dropbox
// Constants // Constants
const ( const (
rcloneClientID = "5jcck7diasz0rqy" rcloneClientID = "5jcck7diasz0rqy"
@@ -106,14 +101,10 @@ var (
// A regexp matching path names for files Dropbox ignores // A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files // See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`) ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
) )
// Register with Fs // Register with Fs
func init() { func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "dropbox", Name: "dropbox",
Description: "Dropbox", Description: "Dropbox",
@@ -380,15 +371,14 @@ func (f *Fs) setRoot(root string) {
// getMetadata gets the metadata for a file or directory // getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) { func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{ entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
Path: enc.FromStandardPath(objPath),
})
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
switch e := err.(type) { switch e := err.(type) {
case files.GetMetadataAPIError: case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
notFound = true notFound = true
err = nil err = nil
} }
@@ -451,7 +441,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -464,7 +454,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot root := f.slashRoot
if dir != "" { if dir != "" {
root += "/" + dir root += "/" + dir
@@ -475,7 +465,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for { for {
if !started { if !started {
arg := files.ListFolderArg{ arg := files.ListFolderArg{
Path: enc.FromStandardPath(root), Path: root,
Recursive: false, Recursive: false,
} }
if root == "/" { if root == "/" {
@@ -488,7 +478,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil { if err != nil {
switch e := err.(type) { switch e := err.(type) {
case files.ListFolderAPIError: case files.ListFolderAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound { switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
err = fs.ErrorDirNotFound err = fs.ErrorDirNotFound
} }
} }
@@ -525,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Only the last element is reliably cased in PathDisplay // Only the last element is reliably cased in PathDisplay
entryPath := metadata.PathDisplay entryPath := metadata.PathDisplay
leaf := enc.ToStandardName(path.Base(entryPath)) leaf := path.Base(entryPath)
remote := path.Join(dir, leaf) remote := path.Join(dir, leaf)
if folderInfo != nil { if folderInfo != nil {
d := fs.NewDir(remote, time.Now()) d := fs.NewDir(remote, time.Now())
@@ -550,22 +541,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root // can't create or run metadata on root
@@ -583,7 +574,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// create it // create it
arg2 := files.CreateFolderArg{ arg2 := files.CreateFolderArg{
Path: enc.FromStandardPath(root), Path: root,
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2) _, err = f.srv.CreateFolderV2(&arg2)
@@ -595,7 +586,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir deletes the container // Rmdir deletes the container
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't remove root // can't remove root
@@ -609,7 +600,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errors.Wrap(err, "Rmdir") return errors.Wrap(err, "Rmdir")
} }
root = enc.FromStandardPath(root)
// check directory empty // check directory empty
arg := files.ListFolderArg{ arg := files.ListFolderArg{
Path: root, Path: root,
@@ -652,7 +642,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -666,12 +656,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Copy // Copy
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcObj.remotePath()
FromPath: enc.FromStandardPath(srcObj.remotePath()), arg.ToPath = dstObj.remotePath()
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -700,12 +687,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) (err error) { func (f *Fs) Purge() (err error) {
// Let dropbox delete the filesystem tree // Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{ _, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
Path: enc.FromStandardPath(f.slashRoot),
})
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err
@@ -720,7 +705,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -734,12 +719,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Do the move // Do the move
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcObj.remotePath()
FromPath: enc.FromStandardPath(srcObj.remotePath()), arg.ToPath = dstObj.remotePath()
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error var err error
var result *files.RelocationResult var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -763,8 +745,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote)) absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath) fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{ createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath, Path: absPath,
@@ -775,8 +757,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil && strings.Contains(err.Error(), if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it") fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
listArg := sharing.ListSharedLinksArg{ listArg := sharing.ListSharedLinksArg{
Path: absPath, Path: absPath,
@@ -817,7 +798,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -838,12 +819,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// ...apparently not necessary // ...apparently not necessary
// Do the move // Do the move
arg := files.RelocationArg{ arg := files.RelocationArg{}
RelocationPath: files.RelocationPath{ arg.FromPath = srcPath
FromPath: enc.FromStandardPath(srcPath), arg.ToPath = dstPath
ToPath: enc.FromStandardPath(dstPath),
},
}
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg) _, err = f.srv.MoveV2(&arg)
return shouldRetry(err) return shouldRetry(err)
@@ -856,7 +834,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { func (f *Fs) About() (usage *fs.Usage, err error) {
var q *users.SpaceUsage var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage() q, err = f.users.GetSpaceUsage()
@@ -884,7 +862,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType) return hash.Set(hash.Dropbox)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -908,8 +886,8 @@ func (o *Object) Remote() string {
} }
// Hash returns the dropbox special hash // Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != DbHashType { if t != hash.Dropbox {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
err := o.readMetaData() err := o.readMetaData()
@@ -970,7 +948,7 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -982,7 +960,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
// //
// Commits the datastore // Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this // Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then // error will cause the file to be deleted first then
// re-uploaded to set the time. // re-uploaded to set the time.
@@ -995,13 +973,9 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{ arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
Path: enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg) _, in, err = o.fs.srv.Download(&arg)
return shouldRetry(err) return shouldRetry(err)
@@ -1010,7 +984,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
switch e := err.(type) { switch e := err.(type) {
case files.DownloadAPIError: case files.DownloadAPIError:
// Don't attempt to retry copyright violation errors // Don't attempt to retry copyright violation errors
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent { if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
return nil, fserrors.NoRetryError(err) return nil, fserrors.NoRetryError(err)
} }
} }
@@ -1125,15 +1099,16 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath() remote := o.remotePath()
if ignoredFiles.MatchString(remote) { if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote))) fs.Logf(o, "File name disallowed - not uploading")
return nil
} }
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath())) commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite" commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision. // The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second) commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
size := src.Size() size := src.Size()
var err error var err error
@@ -1153,11 +1128,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{ _, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
Path: enc.FromStandardPath(o.remotePath()),
})
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err

View File

@@ -4,8 +4,8 @@ package dropbox
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,396 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = enc.ToStandardName(item.Filename)
}
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := enc.FromStandardName(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

View File

@@ -1,413 +0,0 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
const enc = encodings.Fichier
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: fileSize,
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

View File

@@ -1,158 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.file.Size
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,120 +0,0 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int64 `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int64 `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,8 +2,6 @@
package ftp package ftp
import ( import (
"context"
"crypto/tls"
"io" "io"
"net/textproto" "net/textproto"
"os" "os"
@@ -12,19 +10,16 @@ import (
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
) )
const enc = encodings.FTP
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -51,25 +46,11 @@ func init() {
Help: "FTP password", Help: "FTP password",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited", Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, },
}, },
}) })
@@ -77,14 +58,11 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Host string `config:"host"` Host string `config:"host"`
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"` Concurrency int `config:"concurrency"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -142,18 +120,7 @@ func (f *Fs) Features() *fs.Features {
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) { func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)} c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil { if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err) fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial") return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -215,7 +182,6 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
@@ -237,11 +203,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
dialAddr := opt.Host + ":" + port dialAddr := opt.Host + ":" + port
protocol := "ftp://" u := "ftp://" + path.Join(dialAddr+"/", root)
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
@@ -268,7 +230,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -307,37 +269,10 @@ func translateErrorDir(err error) error {
return err return err
} }
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = enc.ToStandardName(entry.Name)
entry.Target = enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory // findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) { func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote) fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
Type: ftp.EntryTypeFolder,
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath) dir := path.Dir(fullPath)
base := path.Base(fullPath) base := path.Base(fullPath)
@@ -345,13 +280,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "findItem") return nil, errors.Wrap(err, "findItem")
} }
files, err := c.List(dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for _, file := range files { for _, file := range files {
entryToStandard(file)
if file.Name == base { if file.Name == base {
return file, nil return file, nil
} }
@@ -361,7 +295,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
@@ -405,42 +339,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err) // defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
files, err := c.List(path.Join(f.root, dir))
var listErr error f.putFtpConnection(&c, err)
var files []*ftp.Entry if err != nil {
return nil, translateErrorDir(err)
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
return
}
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
} }
// Annoyingly FTP returns success for a directory which // Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.
@@ -455,7 +364,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
for i := range files { for i := range files {
object := files[i] object := files[i]
entryToStandard(object)
newremote := path.Join(dir, object.Name) newremote := path.Join(dir, object.Name)
switch object.Type { switch object.Type {
case ftp.EntryTypeFolder: case ftp.EntryTypeFolder:
@@ -496,7 +404,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(src.Remote())
if err != nil { if err != nil {
@@ -506,13 +414,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
err = o.Update(ctx, in, src, options...) err = o.Update(in, src, options...)
return o, err return o, err
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
@@ -525,21 +433,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "getInfo") return nil, errors.Wrap(err, "getInfo")
} }
files, err := c.List(dirFromStandardPath(dir)) files, err := c.List(dir)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, translateErrorFile(err) return nil, translateErrorFile(err)
} }
for i := range files { for i := range files {
file := files[i] if files[i].Name == base {
entryToStandard(file)
if file.Name == base {
info := &FileInfo{ info := &FileInfo{
Name: remote, Name: remote,
Size: file.Size, Size: files[i].Size,
ModTime: file.Time, ModTime: files[i].Time,
IsDir: file.Type == ftp.EntryTypeFolder, IsDir: files[i].Type == ftp.EntryTypeFolder,
} }
return info, nil return info, nil
} }
@@ -549,7 +455,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// mkdir makes the directory and parents using unrooted paths // mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error { func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" { if abspath == "." || abspath == "/" {
return nil return nil
} }
@@ -571,7 +476,7 @@ func (f *Fs) mkdir(abspath string) error {
if connErr != nil { if connErr != nil {
return errors.Wrap(connErr, "mkdir") return errors.Wrap(connErr, "mkdir")
} }
err = c.MakeDir(dirFromStandardPath(abspath)) err = c.MakeDir(abspath)
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
switch errX := err.(type) { switch errX := err.(type) {
case *textproto.Error: case *textproto.Error:
@@ -593,7 +498,7 @@ func (f *Fs) mkParentDir(remote string) error {
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(root) return f.mkdir(root)
@@ -602,18 +507,18 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
} }
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir))) err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
return translateErrorDir(err) return translateErrorDir(err)
} }
// Move renames a remote file object // Move renames a remote file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -628,14 +533,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "Move") return nil, errors.Wrap(err, "Move")
} }
err = c.Rename( err = c.Rename(
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)), path.Join(srcObj.fs.root, srcObj.remote),
enc.FromStandardPath(path.Join(f.root, remote)), path.Join(f.root, remote),
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move Rename failed") return nil, errors.Wrap(err, "Move Rename failed")
} }
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed") return nil, errors.Wrap(err, "Move NewObject failed")
} }
@@ -650,7 +555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -682,8 +587,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.Wrap(err, "DirMove") return errors.Wrap(err, "DirMove")
} }
err = c.Rename( err = c.Rename(
dirFromStandardPath(srcPath), srcPath,
dirFromStandardPath(dstPath), dstPath,
) )
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -713,7 +618,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the hash of an object returning a lowercase hex string // Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -723,12 +628,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.info.ModTime return o.info.ModTime
} }
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return nil return nil
} }
@@ -789,7 +694,7 @@ func (f *ftpReadCloser) Close() error {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err) // defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
@@ -809,7 +714,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
if err != nil { if err != nil {
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
} }
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset)) fd, err := c.RetrFrom(path, uint64(offset))
if err != nil { if err != nil {
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open") return nil, errors.Wrap(err, "open")
@@ -823,7 +728,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err) // defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed // remove the file if upload failed
@@ -833,7 +738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been // may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw // able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
removeErr := o.Remove(ctx) removeErr := o.Remove()
if removeErr != nil { if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr) fs.Debugf(o, "Failed to remove: %v", removeErr)
} else { } else {
@@ -844,7 +749,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return errors.Wrap(err, "Update") return errors.Wrap(err, "Update")
} }
err = c.Stor(enc.FromStandardPath(path), in) err = c.Stor(path, in)
if err != nil { if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors _ = c.Quit() // toss this connection to avoid sync errors
remove() remove()
@@ -859,7 +764,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
@@ -868,13 +773,13 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote) err = o.fs.Rmdir(o.remote)
} else { } else {
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return errors.Wrap(err, "Remove")
} }
err = c.Delete(enc.FromStandardPath(path)) err = c.Delete(path)
o.fs.putFtpConnection(&c, err) o.fs.putFtpConnection(&c, err)
} }
return err return err

View File

@@ -4,8 +4,8 @@ package ftp_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/ftp" "github.com/ncw/rclone/backend/ftp"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,4 +1,7 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage // Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage package googlecloudstorage
/* /*
@@ -23,23 +26,23 @@ import (
"net/http" "net/http"
"os" "os"
"path" "path"
"regexp"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@@ -61,7 +64,7 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
storageConfig = &oauth2.Config{ storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope}, Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint, Endpoint: google.Endpoint,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -69,8 +72,6 @@ var (
} }
) )
const enc = encodings.GoogleCloudStorage
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -266,16 +267,16 @@ type Options struct {
// Fs represents a remote storage server // Fs represents a remote storage server
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on if any root string // the path we are working on if any
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
svc *storage.Service // the connection to the storage server svc *storage.Service // the connection to the storage server
client *http.Client // authorized client client *http.Client // authorized client
rootBucket string // bucket part of root (if any) bucket string // the bucket we are working on
rootDirectory string // directory part of root (if any) bucketOKMu sync.Mutex // mutex to protect bucket OK
cache *bucket.Cache // cache of bucket status bucketOK bool // true if we have created the bucket
pacer *fs.Pacer // To pace the API calls pacer *fs.Pacer // To pace the API calls
} }
// Object describes a storage object // Object describes a storage object
@@ -300,18 +301,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs) // Root of the remote (as passed into NewFs)
func (f *Fs) Root() string { func (f *Fs) Root() string {
return f.root if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
} }
// String converts this Fs to a string // String converts this Fs to a string
func (f *Fs) String() string { func (f *Fs) String() string {
if f.rootBucket == "" { if f.root == "" {
return fmt.Sprintf("GCS root") return fmt.Sprintf("Storage bucket %s", f.bucket)
} }
if f.rootDirectory == "" { return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
} }
// Features returns the optional features of this Fs // Features returns the optional features of this Fs
@@ -343,24 +344,21 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err return again, err
} }
// parsePath parses a remote 'url' // Pattern to match a storage path
func parsePath(path string) (root string) { var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
root = strings.Trim(path, "/")
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return return
} }
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) { func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil { if err != nil {
@@ -370,15 +368,8 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
} }
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client var oAuthClient *http.Client
// Parse config into Options struct // Parse config into Options struct
@@ -418,19 +409,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
} }
f := &Fs{ bucket, directory, err := parsePath(root)
name: name, if err != nil {
root: root, return nil, err
opt: *opt, }
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(), f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
} }
f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f) }).Fill(f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -440,19 +434,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
} }
if f.rootBucket != "" && f.rootDirectory != "" { if f.root != "" {
f.root += "/"
// Check to see if the object exists // Check to see if the object exists
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do() _, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err == nil { if err == nil {
newRoot := path.Dir(f.root) f.root = path.Dir(directory)
if newRoot == "." { if f.root == "." {
newRoot = "" f.root = ""
} else {
f.root += "/"
} }
f.setRoot(newRoot)
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -463,7 +458,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -471,7 +466,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
if info != nil { if info != nil {
o.setMetaData(info) o.setMetaData(info)
} else { } else {
err := o.readMetaData(ctx) // reads info and meta, returning an error err := o.readMetaData() // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -481,8 +476,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// listFn is called from list to handle an object. // listFn is called from list to handle an object.
@@ -493,24 +488,20 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// //
// Set recurse to read sub directories // Set recurse to read sub directories
// func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
// The remote has prefix removed from it and if addBucket is set root := f.root
// then it adds the bucket to the start. rootLength := len(root)
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { if dir != "" {
if prefix != "" { root += dir + "/"
prefix += "/"
} }
if directory != "" { list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
if !recurse { if !recurse {
list = list.Delimiter("/") list = list.Delimiter("/")
} }
for { for {
var objects *storage.Objects var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do() objects, err = list.Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -523,38 +514,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
if !recurse { if !recurse {
var object storage.Object var object storage.Object
for _, remote := range objects.Prefixes { for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") { if !strings.HasSuffix(prefix, "/") {
continue continue
} }
remote = enc.ToStandardPath(remote) err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
if err != nil { if err != nil {
return err return err
} }
} }
} }
for _, object := range objects.Items { for _, object := range objects.Items {
remote := enc.ToStandardPath(object.Name) if !strings.HasPrefix(object.Name, root) {
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name) fs.Logf(f, "Odd name received %q", object.Name)
continue continue
} }
remote = remote[len(prefix):] remote := object.Name[rootLength:]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker? // is this a directory marker?
if isDirectory && object.Size == 0 { if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker continue // skip directory marker
} }
err = fn(remote, object, false) err = fn(remote, object, false)
@@ -571,23 +555,32 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} }
// Convert a list item into a DirEntry // Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory { if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil return d, nil
} }
o, err := f.newObjectWithInfo(ctx, remote, object) o, err := f.newObjectWithInfo(remote, object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, nil return o, nil
} }
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects // List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
} }
@@ -600,12 +593,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err return nil, err
} }
// bucket must be present if listing succeeded // bucket must be present if listing succeeded
f.cache.MarkOK(bucket) f.markBucketOK()
return entries, err return entries, err
} }
// listBuckets lists the buckets // listBuckets lists the buckets
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" { if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number") return nil, errors.New("can't list buckets without project number")
} }
@@ -613,14 +609,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
for { for {
var buckets *storage.Buckets var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do() buckets, err = listBuckets.Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
for _, bucket := range buckets.Items { for _, bucket := range buckets.Items {
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{}) d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d) entries = append(entries, d)
} }
if buckets.NextPageToken == "" { if buckets.NextPageToken == "" {
@@ -640,15 +636,11 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir) if f.bucket == "" {
if bucket == "" { return f.listBuckets(dir)
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
} }
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") return f.listDir(dir)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -667,44 +659,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir) if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error { err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { entry, err := f.itemToDirEntry(remote, object, isDirectory)
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil { if err != nil {
return err return err
} }
for _, entry := range entries { return list.Add(entry)
err = list.Add(entry) })
if err != nil { if err != nil {
return err return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
} }
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush() return list.Flush()
} }
@@ -713,88 +684,94 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(dir string) (err error) {
bucket, _ := f.split(dir) f.bucketOKMu.Lock()
return f.makeBucket(ctx, bucket) defer f.bucketOKMu.Unlock()
} if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
// makeBucket creates the bucket if it doesn't exist err = f.pacer.Call(func() (bool, error) {
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) { _, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return f.cache.Create(bucket, func() error { return shouldRetry(err)
// List something from the bucket to see if it exists. Doing it like this enables the use of a })
// service account that only has the "Storage Object Admin" role. See #2193 for details. if err == nil {
err = f.pacer.Call(func() (bool, error) { // Bucket already exists
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do() f.bucketOK = true
return shouldRetry(err) return nil
}) } else if gErr, ok := err.(*googleapi.Error); ok {
if err == nil { if gErr.Code != http.StatusNotFound {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket") return errors.Wrap(err, "failed to get bucket")
} }
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" { if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number") return errors.New("can't make bucket without project number")
} }
bucket := storage.Bucket{ bucket := storage.Bucket{
Name: bucket, Name: f.bucket,
Location: f.opt.Location, Location: f.opt.Location,
StorageClass: f.opt.StorageClass, StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
} }
if f.opt.BucketPolicyOnly { }
bucket.IamConfiguration = &storage.BucketIamConfiguration{ err = f.pacer.Call(func() (bool, error) {
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{ insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
Enabled: true, if !f.opt.BucketPolicyOnly {
}, insertBucket.PredefinedAcl(f.opt.BucketACL)
}
} }
return f.pacer.Call(func() (bool, error) { _, err = insertBucket.Do()
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) return shouldRetry(err)
if !f.opt.BucketPolicyOnly { })
insertBucket.PredefinedAcl(f.opt.BucketACL) if err == nil {
} f.bucketOK = true
_, err = insertBucket.Context(ctx).Do() }
return shouldRetry(err) return err
})
}, nil)
} }
// Rmdir deletes the bucket if the fs is at the root // Rmdir deletes the bucket if the fs is at the root
// //
// Returns an error if it isn't empty: Error 409: The bucket you tried // Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty. // to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { func (f *Fs) Rmdir(dir string) (err error) {
bucket, directory := f.split(dir) f.bucketOKMu.Lock()
if bucket == "" || directory != "" { defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil return nil
} }
return f.cache.Remove(bucket, func() error { err = f.pacer.Call(func() (bool, error) {
return f.pacer.Call(func() (bool, error) { err = f.svc.Buckets.Delete(f.bucket).Do()
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do() return shouldRetry(err)
return shouldRetry(err)
})
}) })
if err == nil {
f.bucketOK = false
}
return err
} }
// Precision returns the precision // Precision returns the precision
@@ -811,9 +788,8 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote) err := f.Mkdir("")
err := f.makeBucket(ctx, dstBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -822,7 +798,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction // Temporary Object under construction
dstObj := &Object{ dstObj := &Object{
@@ -830,13 +805,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote, remote: remote,
} }
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil) newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -873,7 +848,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -919,33 +894,24 @@ func (o *Object) setMetaData(info *storage.Object) {
} }
} }
// readObjectInfo reads the definition for an object // readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { //
bucket, bucketPath := o.split() // it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do() object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
if gErr, ok := err.(*googleapi.Error); ok { if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound { if gErr.Code == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
} }
} }
return nil, err
}
return object, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.readObjectInfo(ctx)
if err != nil {
return err return err
} }
o.setMetaData(object) o.setMetaData(object)
@@ -956,8 +922,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err) // fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -973,28 +939,16 @@ func metadataFromModTime(modTime time.Time) map[string]string {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { func (o *Object) SetModTime(modTime time.Time) (err error) {
// read the complete existing object first // This only adds metadata so will perserve other metadata
object, err := o.readObjectInfo(ctx) object := storage.Object{
if err != nil { Bucket: o.fs.bucket,
return err Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
} }
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object) newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -1010,13 +964,11 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil) req, err := http.NewRequest("GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options) fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1043,27 +995,27 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split() err := o.fs.Mkdir("")
err := o.fs.makeBucket(ctx, bucket)
if err != nil { if err != nil {
return err return err
} }
modTime := src.ModTime(ctx) modTime := src.ModTime()
object := storage.Object{ object := storage.Object{
Bucket: bucket, Bucket: o.fs.bucket,
Name: bucketPath, Name: o.fs.root + o.remote,
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime), Metadata: metadataFromModTime(modTime),
} }
var newObject *storage.Object var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name) insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly { if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL) insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
} }
newObject, err = insertObject.Context(ctx).Do() newObject, err = insertObject.Do()
return shouldRetry(err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
@@ -1075,17 +1027,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do() err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err) return shouldRetry(err)
}) })
return err return err
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }

View File

@@ -1,12 +1,14 @@
// Test GoogleCloudStorage filesystem interface // Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test package googlecloudstorage_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/googlecloudstorage" "github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -1,148 +0,0 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -1,311 +0,0 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -1,190 +0,0 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,307 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -1,335 +0,0 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -1,495 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -5,7 +5,6 @@
package http package http
import ( import (
"context"
"io" "io"
"mime" "mime"
"net/http" "net/http"
@@ -13,16 +12,15 @@ import (
"path" "path"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html" "golang.org/x/net/html"
) )
@@ -47,21 +45,6 @@ func init() {
Value: "https://user:pass@example.com", Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password", Help: "Connect to example.com using a username and password",
}}, }},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, { }, {
Name: "no_slash", Name: "no_slash",
Help: `Set this if the site doesn't end directories with / Help: `Set this if the site doesn't end directories with /
@@ -78,26 +61,6 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`, directories.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -105,10 +68,8 @@ If you set this option, rclone will not do the HEAD request. This will mean
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Endpoint string `config:"url"` Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"` NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
} }
// Fs stores the interface to the remote HTTP files // Fs stores the interface to the remote HTTP files
@@ -146,7 +107,6 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to // NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file. // the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -154,10 +114,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err return nil, err
} }
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") { if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/" opt.Endpoint += "/"
} }
@@ -183,15 +139,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse return http.ErrUseLastResponse
} }
// check to see if points to a file // check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil) res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil { if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext isFile = true
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
} }
} }
@@ -256,12 +207,12 @@ func (f *Fs) Precision() time.Duration {
} }
// NewObject creates a new remote http file object // NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
} }
err := o.stat(ctx) err := o.stat()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -364,22 +315,8 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil return names, nil
} }
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in // Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) { func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir) URL := f.url(dir)
u, err := url.Parse(URL) u, err := url.Parse(URL)
if err != nil { if err != nil {
@@ -388,14 +325,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
if !strings.HasSuffix(URL, "/") { if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL) return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
} }
// Do the request res, err := f.httpClient.Get(URL)
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
if err == nil { if err == nil {
defer fs.CheckClose(res.Body, &err) defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound { if res.StatusCode == http.StatusNotFound {
@@ -429,57 +359,38 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" { if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/" dir += "/"
} }
names, err := f.readDir(ctx, dir) names, err := f.readDir(dir)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir) return nil, errors.Wrapf(err, "error listing %q", dir)
} }
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names { for _, name := range names {
isDir := name[len(name)-1] == '/' isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/") name = strings.TrimRight(name, "/")
remote := path.Join(dir, name) remote := path.Join(dir, name)
if isDir { if isDir {
add(fs.NewDir(remote, timeUnset)) dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else { } else {
in <- remote file := &Object{
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
} }
} }
close(in)
wg.Wait()
return entries, nil return entries, nil
} }
@@ -488,12 +399,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
@@ -516,7 +427,7 @@ func (o *Object) Remote() string {
} }
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -526,7 +437,7 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the remote http file // ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.modTime return o.modTime
} }
@@ -536,21 +447,9 @@ func (o *Object) url() string {
} }
// stat updates the info field in the Object // stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error { func (o *Object) stat() error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
url := o.url() url := o.url()
req, err := http.NewRequest("HEAD", url, nil) res, err := o.fs.httpClient.Head(url)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
if err == nil && res.StatusCode == http.StatusNotFound { if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
} }
@@ -581,7 +480,7 @@ func (o *Object) stat(ctx context.Context) error {
// SetModTime sets the modification and access time to the specified time // SetModTime sets the modification and access time to the specified time
// //
// it also updates the info field // it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly return errorReadOnly
} }
@@ -591,19 +490,17 @@ func (o *Object) Storable() bool {
} }
// Open a remote http file object for reading. Seek is supported // Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url() url := o.url()
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Open failed") return nil, errors.Wrap(err, "Open failed")
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers // Add optional headers
for k, v := range fs.OpenOptionHeaders(options) { for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v) req.Header.Add(k, v)
} }
o.fs.addHeaders(req)
// Do the request // Do the request
res, err := o.fs.httpClient.Do(req) res, err := o.fs.httpClient.Do(req)
@@ -620,27 +517,27 @@ func (f *Fs) Hashes() hash.Set {
} }
// Mkdir makes the root directory of the Fs object // Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
return errorReadOnly return errorReadOnly
} }
// Remove a remote http file object // Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return errorReadOnly return errorReadOnly
} }
// Rmdir removes the root directory of the Fs object // Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return errorReadOnly return errorReadOnly
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly return errorReadOnly
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.contentType return o.contentType
} }

View File

@@ -1,7 +1,8 @@
// +build go1.8
package http package http
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@@ -10,15 +11,14 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strings"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/ncw/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/rclone/rclone/lib/rest" "github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -27,7 +27,6 @@ var (
remoteName = "TestHTTP" remoteName = "TestHTTP"
testPath = "test" testPath = "test"
filesPath = filepath.Join(testPath, "files") filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
) )
// prepareServer the test server and return a function to tidy it up afterwards // prepareServer the test server and return a function to tidy it up afterwards
@@ -35,16 +34,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files // file server for test/files
fileServer := http.FileServer(http.Dir(filesPath)) fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server // Make the test server
ts := httptest.NewServer(handler) ts := httptest.NewServer(fileServer)
// Configure the remote // Configure the remote
config.LoadConfig() config.LoadConfig()
@@ -55,9 +46,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL) // config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{ m := configmap.Simple{
"type": "http", "type": "http",
"url": ts.URL, "url": ts.URL,
"headers": strings.Join(headers, ","),
} }
// return a function to tidy up // return a function to tidy up
@@ -76,7 +66,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List(context.Background(), "") entries, err := f.List("")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -132,7 +122,7 @@ func TestListSubDir(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
entries, err := f.List(context.Background(), "three") entries, err := f.List("three")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -150,7 +140,7 @@ func TestNewObject(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote()) assert.Equal(t, "four/under four.txt", o.Remote())
@@ -160,7 +150,7 @@ func TestNewObject(t *testing.T) {
// Test the time is correct on the object // Test the time is correct on the object
tObj := o.ModTime(context.Background()) tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt")) fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err) require.NoError(t, err)
@@ -170,7 +160,7 @@ func TestNewObject(t *testing.T) {
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second)) assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found // check object not found
o, err = f.NewObject(context.Background(), "not found.txt") o, err = f.NewObject("not found.txt")
assert.Nil(t, o) assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Equal(t, fs.ErrorObjectNotFound, err)
} }
@@ -179,11 +169,11 @@ func TestOpen(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
// Test normal read // Test normal read
fd, err := o.Open(context.Background()) fd, err := o.Open()
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd) data, err := ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -191,7 +181,7 @@ func TestOpen(t *testing.T) {
assert.Equal(t, "beetroot\n", string(data)) assert.Equal(t, "beetroot\n", string(data))
// Test with range request // Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5}) fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err) require.NoError(t, err)
data, err = ioutil.ReadAll(fd) data, err = ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -203,12 +193,12 @@ func TestMimeType(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
do, ok := o.(fs.MimeTyper) do, ok := o.(fs.MimeTyper)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background())) assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
@@ -228,7 +218,7 @@ func TestIsAFileSubDir(t *testing.T) {
f, err := NewFs(remoteName, "three/underthree.txt", m) f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "") entries, err := f.List("")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)

View File

@@ -24,7 +24,7 @@
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr> <tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/rclone/rclone/issues/1573 --> <!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>

View File

@@ -1,12 +1,11 @@
package hubic package hubic
import ( import (
"context"
"net/http" "net/http"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift" "github.com/ncw/swift"
"github.com/rclone/rclone/fs"
) )
// auth is an authenticator for swift // auth is an authenticator for swift
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) { func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10 const retries = 10
for try := 1; try <= retries; try++ { for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO()) err = a.f.getCredentials()
if err == nil { if err == nil {
break break
} }

View File

@@ -7,7 +7,6 @@ package hubic
// to be revisted after some actual experience. // to be revisted after some actual experience.
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@@ -16,16 +15,16 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift" swiftLib "github.com/ncw/swift"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -116,12 +115,11 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API // getCredentials reads the OpenStack Credentials using the Hubic API
// //
// The credentials are read into the Fs // The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) { func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil) req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil { if err != nil {
return err return err
} }
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req) resp, err := f.client.Do(req)
if err != nil { if err != nil {
return err return err

View File

@@ -4,16 +4,14 @@ package hubic_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/hubic" "github.com/ncw/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:", RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil), NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
}) })
} }

View File

@@ -46,95 +46,6 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format // APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) } func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present // Flag is a hacky type for checking if an attribute is present
type Flag bool type Flag bool
@@ -153,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented") return attr, errors.New("unimplemented")
} }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/* /*
GET http://www.jottacloud.com/JFS/<account> GET http://www.jottacloud.com/JFS/<account>
@@ -182,8 +102,8 @@ GET http://www.jottacloud.com/JFS/<account>
</user> </user>
*/ */
// DriveInfo represents a Jottacloud account // AccountInfo represents a Jottacloud account
type DriveInfo struct { type AccountInfo struct {
Username string `xml:"username"` Username string `xml:"username"`
AccountType string `xml:"account-type"` AccountType string `xml:"account-type"`
Locked bool `xml:"locked"` Locked bool `xml:"locked"`
@@ -360,3 +280,37 @@ func (e *Error) Error() string {
} }
return out return out
} }
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@ import (
"io" "io"
"testing" "testing"
"github.com/rclone/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -4,8 +4,8 @@ package jottacloud_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/jottacloud" "github.com/ncw/rclone/backend/jottacloud"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,77 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaCloud reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,7 +1,6 @@
package koofr package koofr
import ( import (
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
@@ -11,20 +10,16 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings" "github.com/ncw/rclone/fs/hash"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient" httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient" koofrclient "github.com/koofr/go-koofrclient"
) )
const enc = encodings.Koofr
// Register Fs with rclone // Register Fs with rclone
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -44,12 +39,6 @@ func init() {
Required: false, Required: false,
Default: "", Default: "",
Advanced: true, Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your Koofr user name", Help: "Your Koofr user name",
@@ -70,7 +59,6 @@ type Options struct {
MountID string `config:"mountid"` MountID string `config:"mountid"`
User string `config:"user"` User string `config:"user"`
Password string `config:"password"` Password string `config:"password"`
SetMTime bool `config:"setmtime"`
} }
// A Fs is a representation of a remote Koofr Fs // A Fs is a representation of a remote Koofr Fs
@@ -117,7 +105,7 @@ func (o *Object) Remote() string {
} }
// ModTime returns the modification time of the Object // ModTime returns the modification time of the Object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000) return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
} }
@@ -132,7 +120,7 @@ func (o *Object) Fs() fs.Info {
} }
// Hash returns an MD5 hash of the Object // Hash returns an MD5 hash of the Object
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) { func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 { if typ == hash.MD5 {
return o.info.Hash, nil return o.info.Hash, nil
} }
@@ -150,15 +138,14 @@ func (o *Object) Storable() bool {
} }
// SetModTime is not supported // SetModTime is not supported
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error { func (o *Object) SetModTime(mtime time.Time) error {
return fs.ErrorCantSetModTimeWithoutDelete return nil
} }
// Open opens the Object for reading // Open opens the Object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1 var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
case *fs.SeekOption: case *fs.SeekOption:
@@ -175,6 +162,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
if sOff == 0 && eOff < 0 { if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath()) return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
} }
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{ span := &koofrclient.FileSpan{
Start: sOff, Start: sOff,
End: eOff, End: eOff,
@@ -183,13 +177,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
// Update updates the Object contents // Update updates the Object contents
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000 putopts := &koofrclient.PutFilter{
putopts := &koofrclient.PutOptions{ ForceOverwrite: true,
ForceOverwrite: true, NoRename: true,
NoRename: true, IgnoreNonExisting: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := o.fullPath() fullPath := o.fullPath()
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -198,7 +190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts) info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return err return err
} }
@@ -207,7 +199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove deletes the remote Object // Remove deletes the remote Object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath()) return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
} }
@@ -233,10 +225,7 @@ func (f *Fs) Features() *fs.Features {
// Precision denotes that setting modification times is not supported // Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
if !f.opt.SetMTime { return fs.ModTimeNotSupported
return fs.ModTimeNotSupported
}
return time.Millisecond
} }
// Hashes returns a set of hashes are Provided by the Fs // Hashes returns a set of hashes are Provided by the Fs
@@ -246,7 +235,7 @@ func (f *Fs) Hashes() hash.Set {
// fullPath constructs a full, absolute path from a Fs root relative path, // fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string { func (f *Fs) fullPath(part string) string {
return enc.FromStandardPath(path.Join("/", f.root, part)) return path.Join("/", f.root, part)
} }
// NewFs constructs a new filesystem given a root path and configuration options // NewFs constructs a new filesystem given a root path and configuration options
@@ -260,9 +249,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
httpClient := httpclient.New() client := koofrclient.NewKoofrClient(opt.Endpoint, false)
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s", basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass))) base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth) client.HTTPClient.Headers.Set("Authorization", basicAuth)
@@ -299,7 +286,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
return nil, errors.New("Failed to find mount " + opt.MountID) return nil, errors.New("Failed to find mount " + opt.MountID)
} }
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root)) rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" { if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root) f.root = dir(f.root)
err = fs.ErrorIsFile err = fs.ErrorIsFile
@@ -310,21 +297,20 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
// List returns a list of items in a directory // List returns a list of items in a directory
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return nil, translateErrorsDir(err) return nil, translateErrorsDir(err)
} }
entries = make([]fs.DirEntry, len(files)) entries = make([]fs.DirEntry, len(files))
for i, file := range files { for i, file := range files {
remote := path.Join(dir, enc.ToStandardName(file.Name))
if file.Type == "dir" { if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0)) entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else { } else {
entries[i] = &Object{ entries[i] = &Object{
fs: f, fs: f,
info: file, info: file,
remote: remote, remote: path.Join(dir, file.Name),
} }
} }
} }
@@ -332,7 +318,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
// NewObject creates a new remote Object for a given remote path // NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) { func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote)) info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
@@ -348,13 +334,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
} }
// Put updates a remote Object // Put updates a remote Object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000 putopts := &koofrclient.PutFilter{
putopts := &koofrclient.PutOptions{ ForceOverwrite: true,
ForceOverwrite: true, NoRename: true,
NoRename: true, IgnoreNonExisting: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := f.fullPath(src.Remote()) fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -363,7 +347,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts) info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
} }
@@ -375,8 +359,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// PutStream updates a remote Object with a stream of unknown size // PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// isBadRequest is a predicate which holds true iff the error returned was // isBadRequest is a predicate which holds true iff the error returned was
@@ -452,13 +436,13 @@ func (f *Fs) mkdir(fullPath string) error {
// Mkdir creates a directory at the given remote path. Creates ancestors if // Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary // necessary
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir) fullPath := f.fullPath(dir)
return f.mkdir(fullPath) return f.mkdir(fullPath)
} }
// Rmdir removes an (empty) directory at the given remote path // Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return translateErrorsDir(err) return translateErrorsDir(err)
@@ -474,25 +458,24 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
// Copy copies a remote Object to the given path // Copy copies a remote Object to the given path
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
err := f.mkdir(dstDir) err := f.mkdir(dstDir)
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
err = f.client.FilesCopy((src.(*Object)).fs.mountID, err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote), (src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime}) f.mountID, dstFullPath)
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// Move moves a remote Object to the given path // Move moves a remote Object to the given path
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object) srcObj := src.(*Object)
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
@@ -505,11 +488,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil { if err != nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// DirMove moves a remote directory to the given path // DirMove moves a remote directory to the given path
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs) srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote) srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote) dstFullPath := f.fullPath(dstRemote)
@@ -529,7 +512,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// About reports space usage (with a MB precision) // About reports space usage (with a MB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID) mount, err := f.client.MountsDetails(f.mountID)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -545,7 +528,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Purge purges the complete Fs // Purge purges the complete Fs
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath(""))) err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err return err
} }
@@ -597,7 +580,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
} }
// PublicLink creates a public link to the remote path // PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote)) linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return "", translateErrorsDir(err) return "", translateErrorsDir(err)

View File

@@ -3,7 +3,7 @@ package koofr_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

Some files were not shown because too many files have changed in this diff Show More