1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-24 12:13:19 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
ad7078c365 drive: add experimental --drive-fast-list-grouping flag #3114 2019-04-16 12:08:01 +01:00
1474 changed files with 76709 additions and 207452 deletions

49
.appveyor.yml Normal file
View File

@@ -0,0 +1,49 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

50
.circleci/config.yml Normal file
View File

@@ -0,0 +1,50 @@
---
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

7
.gitattributes vendored
View File

@@ -1,7 +0,0 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

View File

@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/rclone/rclone/issues/new
https://github.com/ncw/rclone/issues/new
otherwise fill in the form below.

View File

@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

View File

@@ -1,250 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.13.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.13.x'
modules: 'off'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.10
os: ubuntu-latest
go: '1.10.x'
modules: 'off'
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
quicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
if: matrix.deploy && github.head_ref == ''
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Build rclone
run: |
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: Upload artifacts
run: |
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
if: github.head_ref == ''

3
.gitignore vendored
View File

@@ -5,6 +5,3 @@ build
docs/public
rclone.iml
.idea
.history
*.test
*.log

2
.pkgr.yml Normal file
View File

@@ -0,0 +1,2 @@
default_dependencies: false
cli: rclone

106
.travis.yml Normal file
View File

@@ -0,0 +1,106 @@
---
language: go
sudo: required
dist: xenial
os:
- linux
go_import_path: github.com/ncw/rclone
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
install:
- make vars
env:
global:
- GOTAGS=cmount
- GO111MODULE=off
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
env:
- GOTAGS=cmount
script:
- make build_dep
- make check
- make quicktest
- make racequicktest
- make compile_all
- os: osx
go: 1.12.x
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: ncw/rclone
all_branches: true
go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"

View File

@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
page](https://github.com/ncw/rclone).
Now in your terminal
go get -u github.com/rclone/rclone
cd $GOPATH/src/github.com/rclone/rclone
go get -u github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -subdir
cd fs/operations
go test -v -remote TestDrive:
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
@@ -341,12 +341,6 @@ Getting going
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `go install -tags noencode`
* `rclone purge -v TestRemote:rclone-info`
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests
@@ -368,59 +362,19 @@ Or if you want to run the integration tests manually:
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
`Google Drive`) but with the local file system last.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -1,22 +0,0 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

View File

@@ -12,7 +12,6 @@ Current active maintainers of rclone are:
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft**
@@ -52,7 +51,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ##

File diff suppressed because it is too large Load Diff

6348
MANUAL.md

File diff suppressed because it is too large Load Diff

6494
MANUAL.txt

File diff suppressed because it is too large Load Diff

View File

@@ -1,37 +1,23 @@
SHELL = bash
# Branch we are working on
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
@@ -41,22 +27,19 @@ BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
.PHONY: rclone test_all vars version
.PHONY: rclone vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
touch fs/version.go
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
@@ -64,7 +47,8 @@ version:
@echo '$(TAG)'
# Full suite of integration tests
test: rclone test_all
test: rclone
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
@@ -77,7 +61,10 @@ racequicktest:
# Do source code quality checks
check: rclone
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------"
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run $(LINTTAGS) ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------"
@@ -87,8 +74,8 @@ build_dep:
# Get the release dependencies
release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Update dependencies
update:
@@ -96,11 +83,6 @@ update:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
@@ -116,10 +98,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
./bin/make_backend_docs.py
rcdocs: rclone
bin/make_rc_docs.sh
@@ -172,7 +154,7 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -188,15 +170,25 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
@@ -208,25 +200,24 @@ serve: website
cd docs && hugo server -v -w
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
git tag -f -s -m "Version $(VERSION)" $(VERSION)
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -1,4 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -6,15 +6,13 @@
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[Forum](https://forum.rclone.org/) |
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/ncw/rclone)](https://goreportcard.com/report/github.com/ncw/rclone)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone
@@ -22,27 +20,23 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
@@ -55,8 +49,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
@@ -77,12 +70,9 @@ Please see [the full list of all storage providers and their features](https://r
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation

View File

@@ -1,21 +1,14 @@
# Release
This file describes how to make the various kinds of releases
## Extra required software for making a release
Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
## Making a release
Making a release
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
@@ -33,8 +26,8 @@ This file describes how to make the various kinds of releases
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update
* git status
* git add new files
@@ -55,56 +48,24 @@ Can be fixed with
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug:
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-fixes
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* git push
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +0,0 @@
v1.50.2

View File

@@ -4,17 +4,17 @@ import (
"errors"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fspath"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "alias",
Description: "Alias for an existing remote",
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",

View File

@@ -1,16 +1,15 @@
package alias
import (
"context"
"fmt"
"path"
"path/filepath"
"sort"
"testing"
_ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
_ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require"
)
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList)
gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what)
sort.Sort(gotEntries)

View File

@@ -2,38 +2,31 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlephotos"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/ncw/rclone/backend/b2"
_ "github.com/ncw/rclone/backend/box"
_ "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
_ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/ncw/rclone/backend/ftp"
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/union"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -12,7 +12,6 @@ we ignore assets completely!
*/
import (
"context"
"encoding/json"
"fmt"
"io"
@@ -23,23 +22,22 @@ import (
"time"
acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
)
const (
enc = encodings.AmazonCloudDrive
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
@@ -249,7 +247,6 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -311,7 +308,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
@@ -319,12 +316,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
err = tempF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
_, err := tempF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -334,7 +331,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
@@ -356,7 +353,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -365,7 +362,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// Set info but not meta
o.info = info
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
@@ -375,18 +372,18 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
subFolder, resp, err = folder.GetFolder(leaf)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -407,13 +404,13 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
info, resp, err = folder.CreateFolder(leaf)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -481,7 +478,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent {
continue
}
*node.Name = enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing
out = append(out, node)
}
@@ -506,12 +502,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
@@ -529,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d)
case fileKind:
o, err := f.newObjectWithInfo(ctx, remote, node)
o, err := f.newObjectWithInfo(remote, node)
if err != nil {
iErr = err
return true
@@ -573,7 +569,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well
if inErr == nil {
return false, inInfo, inErr
@@ -613,7 +609,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote()
for i := 1; i <= retries; i++ {
o, err := f.NewObject(ctx, remote)
o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil {
@@ -639,7 +635,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
// Temporary Object under construction
@@ -648,17 +644,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
remote: remote,
}
// Check if object already exists
err := o.readMetaData(ctx)
err := o.readMetaData()
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
default:
return nil, err
}
// If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil {
return nil, err
}
@@ -671,10 +667,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now()
f.tokenRenewer.Start()
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop()
var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
if ok {
return false, nil
}
@@ -688,13 +684,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
_, err = f.dirCache.FindDir(dir, true)
}
return err
}
@@ -708,7 +704,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object)
if !ok {
@@ -717,15 +713,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
err := f.dirCache.FindRoot(true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
if err != nil {
return nil, err
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
if err != nil {
return nil, err
}
@@ -741,12 +737,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcErr, dstErr error
)
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source
return nil, srcErr
}
dstObj, dstErr = f.NewObject(ctx, remote)
dstObj, dstErr = f.NewObject(remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst
return nil, dstErr
@@ -775,7 +771,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "DirMove error: not same remote type")
@@ -791,14 +787,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
err = srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
@@ -813,14 +809,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
@@ -836,7 +832,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
}
if err != nil {
return err
@@ -844,7 +840,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
@@ -877,17 +873,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
@@ -936,8 +932,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -959,7 +955,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
// if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
@@ -970,7 +966,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil {
// return nil, err
// }
// return f.NewObject(ctx, remote), nil
// return f.NewObject(remote), nil
//}
// Purge deletes all the files and the container
@@ -978,8 +974,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// ------------------------------------------------------------
@@ -1003,7 +999,7 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@@ -1026,11 +1022,11 @@ func (o *Object) Size() int64 {
// it also sets the info
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
@@ -1041,7 +1037,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
info, resp, err = folder.GetFile(leaf)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1059,8 +1055,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1074,7 +1070,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented
return fs.ErrorCantSetModTime
}
@@ -1085,7 +1081,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject {
fs.Debugf(o, "Downloading large object via tempLink")
@@ -1097,7 +1093,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if !bigObject {
in, resp, err = file.OpenHeaders(headers)
} else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
}
return o.fs.shouldRetry(resp, err)
})
@@ -1107,7 +1103,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info}
var info *acd.File
var resp *http.Response
@@ -1118,7 +1114,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop()
var ok bool
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
if ok {
return false, nil
}
@@ -1143,7 +1139,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return o.fs.removeNode(o.info)
}
@@ -1161,7 +1157,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
newInfo, resp, err = info.Rename(newName)
return f.shouldRetry(resp, err)
})
return newInfo, err
@@ -1265,7 +1261,7 @@ OnConflict:
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType
}
@@ -1278,7 +1274,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint
go func() {
@@ -1357,11 +1353,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name
name := enc.ToStandardName(*node.Name)
if len(path) > 0 {
path = path + "/" + name
path = path + "/" + *node.Name
} else {
path = name
path = *node.Name
}
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import (
"testing"
"github.com/rclone/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -16,6 +16,7 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"sync"
@@ -23,18 +24,16 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
const (
@@ -54,15 +53,8 @@ const (
maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
// though it is a base64 key checked in here, it is publicly available secret.
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
)
const enc = encodings.AzureBlob
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -71,17 +63,13 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
}, {
Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
}, {
Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Default: false,
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
}, {
Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.",
@@ -141,25 +129,23 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
}
// Fs represents a remote azure server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
isLimited bool // if limited to one container
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL
container string // the container we are working on
containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container
containerDeleted bool // true if we have deleted the container
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a azure object
@@ -183,18 +169,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.container
}
return f.container + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Azure root")
if f.root == "" {
return fmt.Sprintf("Azure container %s", f.container)
}
if f.rootDirectory == "" {
return fmt.Sprintf("Azure container %s", f.rootContainer)
}
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
}
// Features returns the optional features of this Fs
@@ -202,24 +188,21 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a azure path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in azure path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
switch tier {
@@ -312,9 +295,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Don't log stuff to syslog/Windows Event log
pipeline.SetForceLogEnabled(false)
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
@@ -327,15 +307,8 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -354,6 +327,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
}
@@ -368,38 +345,26 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
var (
u *url.URL
serviceURL azblob.ServiceURL
u *url.URL
serviceURL azblob.ServiceURL
containerURL azblob.ContainerURL
)
switch {
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
@@ -412,6 +377,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
@@ -422,30 +388,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" {
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
if container != "" && parts.ContainerName != container {
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
}
containerURL := azblob.NewContainerURL(*u, pipeline)
f.cntURLcache[parts.ContainerName] = &containerURL
f.isLimited = true
f.container = parts.ContainerName
containerURL = azblob.NewContainerURL(*u, pipeline)
} else {
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
}
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
}
f.svcURL = &serviceURL
f.cntURL = &containerURL
if f.rootContainer != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
f.root = oldRoot
return f, nil
}
return nil, err
@@ -456,20 +430,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// return the container URL for the container passed in
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
var ok bool
if containerURL, ok = f.cntURLcache[container]; !ok {
cntURL := f.svcURL.NewContainerURL(container)
containerURL = &cntURL
f.cntURLcache[container] = containerURL
}
return containerURL
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -494,13 +454,13 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// getBlobReference creates an empty blob reference with no metadata
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
return f.cntURL(container).NewBlobURL(containerPath)
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
return f.cntURL.NewBlobURL(f.root + remote)
}
// updateMetadataWithModTime adds the modTime passed in to o.meta.
@@ -536,18 +496,16 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied
//
// dir is the starting directory, "" for root
//
// The remote has prefix removed from it and if addContainer is set then
// it adds the container to the start.
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
if f.cache.IsDeleted(container) {
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
f.containerOKMu.Lock()
deleted := f.containerDeleted
f.containerOKMu.Unlock()
if deleted {
return fs.ErrorDirNotFound
}
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
root := f.root
if dir != "" {
root += dir + "/"
}
delimiter := ""
if !recurse {
@@ -562,14 +520,16 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
UncommittedBlobs: false,
Deleted: false,
},
Prefix: directory,
Prefix: root,
MaxResults: int32(maxResults),
}
ctx := context.Background()
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(err)
})
@@ -582,24 +542,33 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// Advance marker to next
marker = response.NextMarker
for i := range response.Segment.BlobItems {
file := &response.Segment.BlobItems[i]
// Finish if file name no longer has prefix
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
// }
remote := enc.ToStandardPath(file.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd name received %q", remote)
if !strings.HasPrefix(file.Name, f.root) {
fs.Debugf(f, "Odd name received %q", file.Name)
continue
}
remote = remote[len(prefix):]
remote := file.Name[len(f.root):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker
}
if addContainer {
remote = path.Join(container, remote)
}
// Send object
err = fn(remote, file, false)
if err != nil {
@@ -609,14 +578,14 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/")
remote = enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(remote, f.root) {
fs.Debugf(f, "Odd directory name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
remote = remote[len(f.root):]
// Don't send if already sent as a directory marker
if _, found := directoryMarkers[remote]; found {
continue
}
// Send object
err = fn(remote, nil, true)
@@ -641,9 +610,19 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil
}
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerDeleted = false
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -657,24 +636,17 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
f.markContainerOK()
return entries, nil
}
// listContainers returns all the containers to out
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
if f.isLimited {
f.cntURLcacheMu.Lock()
for container := range f.cntURLcache {
d := fs.NewDir(container, time.Time{})
entries = append(entries, d)
}
f.cntURLcacheMu.Unlock()
return entries, nil
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
f.cache.MarkOK(container.Name)
d := fs.NewDir(container.Name, container.Properties.LastModified)
entries = append(entries, d)
return nil
})
@@ -693,15 +665,11 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.container == "" {
return f.listContainers(dir)
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -720,44 +688,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.container == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return list.Add(entry)
})
if err != nil {
return err
}
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush()
}
@@ -798,52 +745,95 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
return fs, fs.Update(in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
func (f *Fs) Mkdir(dir string) error {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.cache.MarkDeleted(container)
return true, err
}
// now try to create the container
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
f.containerOK = true
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.containerDeleted = true
return true, err
}
}
return f.shouldRetry(err)
})
}, nil)
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = true
f.containerDeleted = false
}
return errors.Wrap(err, "failed to make container")
}
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
// isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(dir string) (err error) {
empty := true
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false
return nil
})
@@ -858,42 +848,47 @@ func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err erro
// deleteContainer deletes the container. It can delete a full
// container so use isEmpty if you don't want that.
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
return f.cache.Remove(container, func() error {
options := azblob.ContainerAccessConditions{}
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL(container).Delete(ctx, options)
}
func (f *Fs) deleteContainer() error {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
options := azblob.ContainerAccessConditions{}
ctx := context.Background()
err := f.pacer.Call(func() (bool, error) {
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL.Delete(ctx, options)
}
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
})
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = false
f.containerDeleted = true
}
return errors.Wrap(err, "failed to delete container")
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.isEmpty(ctx, container, directory)
func (f *Fs) Rmdir(dir string) error {
err := f.isEmpty(dir)
if err != nil {
return err
}
return f.deleteContainer(ctx, container)
if f.root != "" || dir != "" {
return nil
}
return f.deleteContainer()
}
// Precision of the remote
@@ -907,14 +902,13 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge() error {
dir := "" // forward compat!
container, directory := f.split(dir)
if container == "" || directory != "" {
// Delegate to caller if not root of a container
if f.root != "" || dir != "" {
// Delegate to caller if not root container
return fs.ErrorCantPurge
}
return f.deleteContainer(ctx, container)
return f.deleteContainer()
}
// Copy src to this remote using server side copy operations.
@@ -926,9 +920,8 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
@@ -937,7 +930,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
dstBlobURL := f.getBlobReference(remote)
srcBlobURL := srcObj.getBlobReference()
source, err := url.Parse(srcBlobURL.String())
@@ -946,6 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
options := azblob.BlobAccessConditions{}
ctx := context.Background()
var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
@@ -966,7 +960,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
copyStatus = getMetadata.CopyStatus()
}
return f.NewObject(ctx, remote)
return f.NewObject(remote)
}
// ------------------------------------------------------------
@@ -990,7 +984,7 @@ func (o *Object) Remote() string {
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@@ -1070,8 +1064,7 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// getBlobReference creates an empty blob reference with no metadata
func (o *Object) getBlobReference() azblob.BlobURL {
container, directory := o.split()
return o.fs.getBlobReference(container, directory)
return o.fs.getBlobReference(o.remote)
}
// clearMetaData clears enough metadata so readMetaData will re-read it
@@ -1123,7 +1116,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err
}
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil
}
@@ -1131,14 +1124,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
func (o *Object) ModTime() (result time.Time) {
// The error is logged in readMetaData
_ = o.readMetaData()
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
@@ -1147,6 +1140,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err)
@@ -1164,14 +1158,14 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download
var offset int64
var count int64
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
@@ -1188,6 +1182,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
blob := o.getBlobReference()
ctx := context.Background()
ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1376,27 +1371,26 @@ outer:
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.Mkdir("")
if err != nil {
return err
}
size := src.Size()
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
o.updateMetadataWithModTime(src.ModTime())
if err != nil {
return err
}
blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(ctx, o)
httpHeaders.ContentType = fs.MimeType(o)
// Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
@@ -1414,13 +1408,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653
// See: https://github.com/ncw/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
}
ctx := context.Background()
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload {
@@ -1453,10 +1448,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
blob := o.getBlobReference()
snapShotOptions := azblob.DeleteSnapshotsOptionNone
ac := azblob.BlobAccessConditions{}
ctx := context.Background()
return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(err)
@@ -1464,7 +1460,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
return o.mimeType
}
@@ -1516,6 +1512,4 @@ var (
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -7,8 +7,8 @@ package azureblob
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -7,7 +7,7 @@ import (
"strings"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fserrors"
)
// Error describes a B2 error response
@@ -50,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}
@@ -189,21 +189,6 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -326,14 +311,3 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}

View File

@@ -4,8 +4,8 @@ import (
"testing"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import (
"testing"
"time"
"github.com/rclone/rclone/fstest"
"github.com/ncw/rclone/fstest"
)
// Test b2 string encoding

View File

@@ -4,8 +4,8 @@ package b2
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -6,7 +6,6 @@ package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
"fmt"
@@ -15,12 +14,12 @@ import (
"strings"
"sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
type hashAppendingReader struct {
@@ -81,7 +80,7 @@ type largeUpload struct {
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
@@ -99,34 +98,33 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts
}
modTime := src.ModTime(ctx)
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: enc.FromStandardPath(bucketPath),
ContentType: fs.MimeType(ctx, src),
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -150,7 +148,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
@@ -162,8 +160,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
@@ -192,12 +190,12 @@ func (up *largeUpload) clearUploadURL() {
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
@@ -241,8 +239,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
@@ -264,7 +262,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
@@ -276,8 +274,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -286,7 +284,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -296,18 +294,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
@@ -317,7 +315,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
@@ -326,19 +324,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish(ctx)
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
@@ -346,7 +344,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
@@ -388,16 +386,16 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error {
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
@@ -428,10 +426,10 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}

View File

@@ -202,23 +202,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}

View File

@@ -10,13 +10,8 @@ package box
// FIXME box can copy a directory
import (
"context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
@@ -25,31 +20,22 @@ import (
"strings"
"time"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const enc = encodings.Box
const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
@@ -62,7 +48,6 @@ const (
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
)
// Globals
@@ -87,34 +72,9 @@ func init() {
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
err := oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
@@ -123,19 +83,6 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
@@ -150,74 +97,6 @@ func init() {
})
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -301,10 +180,22 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "", -1)
}
// restore reserved characters for box
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -312,7 +203,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -347,7 +238,6 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -381,7 +271,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
_, err := f.readMetaDataForPath("")
return err
})
@@ -389,7 +279,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
err = f.dirCache.FindRoot(false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
@@ -397,12 +287,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
err = tempF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
_, err := tempF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -413,7 +303,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
@@ -433,7 +323,7 @@ func (f *Fs) rootSlash() string {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -443,7 +333,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -453,14 +343,14 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -478,7 +368,7 @@ func fieldsValue() url.Values {
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
@@ -488,13 +378,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(),
}
mkdir := api.CreateFolder{
Name: enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: pathID,
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -516,7 +406,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
@@ -531,7 +421,7 @@ OUTER:
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -554,7 +444,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = enc.ToStandardName(item.Name)
item.Name = restoreReservedChars(item.Name)
if fn(item) {
found = true
break OUTER
@@ -577,17 +467,17 @@ OUTER:
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -596,7 +486,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// FIXME more info from dir?
entries = append(entries, d)
} else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info)
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
@@ -620,9 +510,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the object, leaf, directoryID and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
if err != nil {
return
}
@@ -639,22 +529,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return existingObj, existingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
return f.PutUnchecked(in, src)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// PutUnchecked the object into the container
@@ -664,56 +554,56 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
size := src.Size()
modTime := src.ModTime(ctx)
modTime := src.ModTime()
o, _, _, err := f.createObject(ctx, remote, modTime, size)
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
_, err = f.dirCache.FindDir(dir, true)
}
return err
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/" + id,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
err := dc.FindRoot(false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
rootID, err := dc.FindDir(dir, false)
if err != nil {
return err
}
@@ -727,7 +617,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -743,8 +633,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -761,13 +651,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData(ctx)
err := srcObj.readMetaData()
if err != nil {
return nil, err
}
@@ -779,7 +669,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
@@ -790,8 +680,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/files/" + srcObj.id + "/copy",
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{
Name: enc.FromStandardName(leaf),
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
},
@@ -799,7 +690,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -817,12 +708,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// move a file or folder
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object
opts := rest.Opts{
Method: "PUT",
@@ -830,14 +721,14 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(),
}
move := api.UpdateFileMove{
Name: enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -855,7 +746,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -863,13 +754,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil {
return nil, err
}
// Do the move
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil {
return nil, err
}
@@ -889,7 +780,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -905,14 +796,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
err := srcFs.dirCache.FindRoot(false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
err = f.dirCache.FindRoot(true)
if err != nil {
return err
}
@@ -928,14 +819,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
_, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
@@ -946,13 +837,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil {
return err
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
_, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
@@ -961,8 +852,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
@@ -974,7 +865,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
o, err := f.NewObject(remote)
if err != nil {
return "", err
}
@@ -994,7 +885,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
@@ -1031,8 +922,13 @@ func (o *Object) Remote() string {
return o.remote
}
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 {
return "", hash.ErrUnsupported
}
@@ -1041,7 +937,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData(context.TODO())
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -1066,11 +962,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
@@ -1087,8 +983,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1097,7 +993,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
}
// setModTime sets the modification time of the local fs object
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/files/" + o.id,
@@ -1108,15 +1004,15 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
info, err := o.setModTime(ctx, modTime)
func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(modTime)
if err != nil {
return err
}
@@ -1129,7 +1025,7 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" {
return nil, errors.New("can't download - no id")
}
@@ -1141,7 +1037,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1153,9 +1049,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{
@@ -1180,7 +1076,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
opts.Path = "/files/content"
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1197,32 +1093,32 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime(ctx)
modTime := src.ModTime()
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
if err != nil {
return err
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime)
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
}
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id)
func (o *Object) Remove() error {
return o.fs.deleteObject(o.id)
}
// ID returns the ID of the Object if known, or "" if not

View File

@@ -4,8 +4,8 @@ package box_test
import (
"testing"
"github.com/rclone/rclone/backend/box"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/box"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -4,7 +4,6 @@ package box
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
@@ -15,15 +14,15 @@ import (
"sync"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
@@ -38,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = enc.FromStandardName(leaf)
request.FileName = replaceReservedChars(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -98,14 +97,14 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
var body []byte
var resp *http.Response
// For discussion of this value see:
// https://github.com/rclone/rclone/issues/2054
// https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10
var tries int
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
@@ -113,7 +112,7 @@ outer:
return shouldRetry(resp, err)
})
delay := defaultDelay
var why string
why := "unknown"
if err != nil {
// Sometimes we get 400 Error with
// parts_mismatch immediately after uploading
@@ -155,7 +154,7 @@ outer:
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
@@ -236,7 +235,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
@@ -264,7 +263,7 @@ outer:
}
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}

185
backend/cache/cache.go vendored
View File

@@ -18,19 +18,18 @@ import (
"syscall"
"time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/atexit"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"golang.org/x/time/rate"
)
@@ -482,7 +481,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath)
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
}
@@ -509,7 +508,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
doChangeNotify(f.receiveChangeNotify, pollInterval)
}
f.features = (&fs.Features{
@@ -600,7 +599,7 @@ is used on top of the cache.
return f, fsErr
}
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
@@ -627,7 +626,7 @@ func (f *Fs) unwrapRemote(remote string) string {
return remote
}
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
@@ -672,7 +671,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
return out, nil
}
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
type chunkRange struct {
start, end int64
}
@@ -777,18 +776,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
for _, pair := range files {
file, remote := pair[0], pair[1]
var status fileStatus
o, err := f.NewObject(ctx, remote)
o, err := f.NewObject(remote)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
co := o.(*Object)
err = co.refreshFromSource(ctx, true)
err = co.refreshFromSource(true)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
handle := NewObjectHandle(ctx, co, f)
handle := NewObjectHandle(co, f)
handle.UseMemory = false
handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) {
@@ -874,7 +873,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subscribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify")
@@ -921,7 +920,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
var err error
fs.Debugf(f, "new object '%s'", remote)
@@ -940,16 +939,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// search for entry in source or temp fs
var obj fs.Object
if f.opt.TempWritePath != "" {
obj, err = f.tempFs.NewObject(ctx, remote)
obj, err = f.tempFs.NewObject(remote)
// not found in temp fs
if err != nil {
fs.Debugf(remote, "find: not found in local cache fs")
obj, err = f.Fs.NewObject(ctx, remote)
obj, err = f.Fs.NewObject(remote)
} else {
fs.Debugf(obj, "find: found in local cache fs")
}
} else {
obj, err = f.Fs.NewObject(ctx, remote)
obj, err = f.Fs.NewObject(remote)
}
// not found in either fs
@@ -959,13 +958,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// cache the new entry
co = ObjectFromOriginal(ctx, f, obj).persist()
co = ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "find: cached object")
return co, nil
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "list '%s'", dir)
cd := ShallowDirectory(f, dir)
@@ -995,12 +994,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
for _, queuedRemote := range queuedEntries {
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
if err != nil {
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
continue
}
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
co := ObjectFromOriginal(f, queuedEntry).persist()
fs.Debugf(co, "list: cached temp object")
cachedEntries = append(cachedEntries, co)
}
@@ -1008,7 +1007,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// search from the source
sourceEntries, err := f.Fs.List(ctx, dir)
sourceEntries, err := f.Fs.List(dir)
if err != nil {
return nil, err
}
@@ -1046,11 +1045,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
continue
}
co := ObjectFromOriginal(ctx, f, o).persist()
co := ObjectFromOriginal(f, o).persist()
cachedEntries = append(cachedEntries, co)
fs.Debugf(dir, "list: cached object: %v", co)
case fs.Directory:
cdd := DirectoryFromOriginal(ctx, f, o)
cdd := DirectoryFromOriginal(f, o)
// check if the dir isn't expired and add it in cache if it isn't
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
batchDirectories = append(batchDirectories, cdd)
@@ -1080,8 +1079,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
entries, err := f.List(dir)
if err != nil {
return err
}
@@ -1089,7 +1088,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(ctx, innerDir.Remote(), list)
err := f.recurse(innerDir.Remote(), list)
if err != nil {
return err
}
@@ -1106,21 +1105,21 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "list recursively from '%s'", dir)
// we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return
do := f.Fs.Features().ListR
if do != nil {
return do(ctx, dir, func(entries fs.DirEntries) error {
return do(dir, func(entries fs.DirEntries) error {
// we got called back with a set of entries so let's cache them and call the original callback
for _, entry := range entries {
switch o := entry.(type) {
case fs.Object:
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
}
@@ -1133,7 +1132,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list)
err = f.recurse(dir, list)
if err != nil {
return err
}
@@ -1142,9 +1141,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// Mkdir makes the directory (container, bucket)
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
fs.Debugf(f, "mkdir '%s'", dir)
err := f.Fs.Mkdir(ctx, dir)
err := f.Fs.Mkdir(dir)
if err != nil {
return err
}
@@ -1172,7 +1171,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// Rmdir removes the directory (container, bucket) if empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
fs.Debugf(f, "rmdir '%s'", dir)
if f.opt.TempWritePath != "" {
@@ -1182,9 +1181,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
_, err := f.UnWrap().List(ctx, dir)
_, err := f.UnWrap().List(dir)
if err == nil {
err := f.Fs.Rmdir(ctx, dir)
err := f.Fs.Rmdir(dir)
if err != nil {
return err
}
@@ -1192,10 +1191,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
var queuedEntries []*Object
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo)
co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co)
}
}
@@ -1212,7 +1211,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
}
} else {
err := f.Fs.Rmdir(ctx, dir)
err := f.Fs.Rmdir(dir)
if err != nil {
return err
}
@@ -1243,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
do := f.Fs.Features().DirMove
@@ -1265,8 +1264,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
f.backgroundRunner.pause()
defer f.backgroundRunner.play()
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
_, errInTemp := f.tempFs.List(ctx, srcRemote)
_, errInWrap := srcFs.UnWrap().List(srcRemote)
_, errInTemp := f.tempFs.List(srcRemote)
// not found in either fs
if errInWrap != nil && errInTemp != nil {
return fs.ErrorDirNotFound
@@ -1275,7 +1274,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
if errInWrap == nil {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil {
return err
}
@@ -1288,10 +1287,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
var queuedEntries []*Object
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo)
co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co)
if co.tempFileStartedUpload() {
fs.Errorf(co, "can't move - upload has already started. need to finish that")
@@ -1312,16 +1311,16 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
return fs.ErrorCantDirMove
}
err = do(ctx, f.tempFs, srcRemote, dstRemote)
err = do(f.tempFs, srcRemote, dstRemote)
if err != nil {
return err
}
err = f.cache.ReconcileTempUploads(ctx, f)
err = f.cache.ReconcileTempUploads(f)
if err != nil {
return err
}
} else {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil {
return err
}
@@ -1427,10 +1426,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}
}
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put in to the remote path
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
var err error
var obj fs.Object
@@ -1441,7 +1440,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
_ = f.cache.ExpireDir(parentCd)
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
obj, err = f.tempFs.Put(ctx, in, src, options...)
obj, err = f.tempFs.Put(in, src, options...)
if err != nil {
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
return nil, err
@@ -1456,14 +1455,14 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// if cache writes is enabled write it first through cache
} else if f.opt.StoreWrites {
f.cacheReader(in, src, func(inn io.Reader) {
obj, err = put(ctx, inn, src, options...)
obj, err = put(inn, src, options...)
})
if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
}
// last option: save it directly in remote fs
} else {
obj, err = put(ctx, in, src, options...)
obj, err = put(in, src, options...)
if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs")
}
@@ -1475,7 +1474,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// cache the new file
cachedObj := ObjectFromOriginal(ctx, f, obj)
cachedObj := ObjectFromOriginal(f, obj)
// deleting cached chunks and info to be replaced with new ones
_ = f.cache.RemoveObject(cachedObj.abs())
@@ -1498,33 +1497,33 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Put in to the remote path with the modTime given of the given size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fs.Debugf(f, "put data at '%s'", src.Remote())
return f.put(ctx, in, src, options, f.Fs.Put)
return f.put(in, src, options, f.Fs.Put)
}
// PutUnchecked uploads the object
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
return f.put(ctx, in, src, options, do)
return f.put(in, src, options, do)
}
// PutStream uploads the object
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutStream
if do == nil {
return nil, errors.New("can't PutStream")
}
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
return f.put(ctx, in, src, options, do)
return f.put(in, src, options, do)
}
// Copy src to this remote using server side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
do := f.Fs.Features().Copy
@@ -1544,7 +1543,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
// refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil {
if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't copy %v - %v", src, err)
return nil, fs.ErrorCantCopy
}
@@ -1563,7 +1562,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
}
obj, err := do(ctx, srcObj.Object, remote)
obj, err := do(srcObj.Object, remote)
if err != nil {
fs.Errorf(srcObj, "error moving in cache: %v", err)
return nil, err
@@ -1571,7 +1570,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(obj, "copy: file copied")
// persist new
co := ObjectFromOriginal(ctx, f, obj).persist()
co := ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "copy: added to cache")
// expire the destination path
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
@@ -1598,7 +1597,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
// if source fs doesn't support move abort
@@ -1619,7 +1618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil {
if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't move %v - %v", src, err)
return nil, fs.ErrorCantMove
}
@@ -1655,7 +1654,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
}
obj, err := do(ctx, srcObj.Object, remote)
obj, err := do(srcObj.Object, remote)
if err != nil {
fs.Errorf(srcObj, "error moving: %v", err)
return nil, err
@@ -1680,7 +1679,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// advertise to ChangeNotify if wrapped doesn't do that
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
// persist new
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
cachedObj := ObjectFromOriginal(f, obj).persist()
fs.Debugf(cachedObj, "move: added to cache")
// expire new parent
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
@@ -1702,7 +1701,7 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge() error {
fs.Infof(f, "purging cache")
f.cache.Purge()
@@ -1711,7 +1710,7 @@ func (f *Fs) Purge(ctx context.Context) error {
return nil
}
err := do(ctx)
err := do()
if err != nil {
return err
}
@@ -1720,7 +1719,7 @@ func (f *Fs) Purge(ctx context.Context) error {
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) CleanUp() error {
f.CleanUpCache(false)
do := f.Fs.Features().CleanUp
@@ -1728,16 +1727,16 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil
}
return do(ctx)
return do()
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do(ctx)
return do()
}
// Stats returns stats about the cache storage
@@ -1864,24 +1863,6 @@ func cleanPath(p string) string {
return p
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1897,6 +1878,4 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
)

View File

@@ -4,7 +4,6 @@ package cache_test
import (
"bytes"
"context"
"encoding/base64"
goflag "flag"
"fmt"
@@ -22,20 +21,19 @@ import (
"testing"
"time"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -122,7 +120,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "")
listInner, err := rootFs2.List("")
require.NoError(t, err)
require.Len(t, listRoot, 1)
@@ -140,10 +138,10 @@ func TestInternalVfsCache(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
err := rootFs.Mkdir("test")
require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test")
_, err = rootFs.List("test")
require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize)
@@ -268,7 +266,7 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404")
obj, err := rootFs.NewObject("404")
require.Error(t, err)
require.Nil(t, obj)
}
@@ -356,8 +354,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err)
} else {
testData1 = []byte(random.String(100))
testData2 = []byte(random.String(200))
testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(fstest.RandomString(200))
}
// write the object
@@ -447,7 +445,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err)
log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
expectedSize := int64(len([]byte("test content")))
var data2 []byte
@@ -459,7 +457,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content")
}
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
err = o.Update(bytes.NewReader(data2), objInfo)
require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2))
@@ -505,9 +503,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else {
testData = []byte("test content")
}
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount
@@ -517,7 +515,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
@@ -591,9 +589,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else {
testData = []byte("test content")
}
err = rootFs.Mkdir(context.Background(), "test")
err = rootFs.Mkdir("test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one")
err = rootFs.Mkdir("test/one")
require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -610,7 +608,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found)
// move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err)
err = runInstance.retryBlock(func() error {
@@ -672,23 +670,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
err = o.SetModTime(wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
co, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
@@ -715,19 +713,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
err = o.SetModTime(wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
co, err := rootFs.NewObject("data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
@@ -735,9 +733,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
@@ -751,7 +749,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
@@ -796,7 +794,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
co, ok := o.(*cache.Object)
require.True(t, ok)
@@ -835,7 +833,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err)
require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test")
@@ -870,14 +868,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test")
err = cfs.UnWrap().Mkdir("test")
require.NoError(t, err)
for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
require.NoError(t, err)
for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -1082,10 +1080,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background())
_ = f.Features().Purge()
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
err = f.Mkdir("")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
@@ -1099,7 +1097,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
err := f.Features().Purge()
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -1201,7 +1199,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote)
o, err := f.NewObject(remote)
require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size())
return o
@@ -1210,7 +1208,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
obj, err := f.Put(in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
@@ -1230,18 +1228,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1)
obj, err = f.Put(in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
err = obj.Update(in2, objInfo2)
}
require.NoError(t, err)
@@ -1270,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
co, err := f.NewObject(remote)
if err != nil {
return checkSample, err
}
@@ -1285,7 +1283,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset
checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
reader, err := o.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1302,7 +1300,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
err = f.Mkdir(remote)
}
require.NoError(t, err)
}
@@ -1314,11 +1312,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
err = os.Remove(path.Join(r.mntDir, remote))
} else {
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
obj, err = f.NewObject(remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
err = f.Rmdir(remote)
} else {
err = obj.Remove(context.Background())
err = obj.Remove()
}
}
@@ -1336,7 +1334,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll)
}
@@ -1355,7 +1353,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
list, err = f.List(remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
@@ -1395,7 +1393,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
err = rootFs.Features().DirMove(rootFs, src, dst)
if err != nil {
return err
}
@@ -1417,11 +1415,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
_, err = rootFs.Features().Move(obj1, dst)
if err != nil {
return err
}
@@ -1443,11 +1441,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
obj, err := rootFs.NewObject(src)
if err != nil {
return err
}
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
_, err = rootFs.Features().Copy(obj, dst)
if err != nil {
return err
}
@@ -1469,11 +1467,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return time.Time{}, err
}
return obj1.ModTime(context.Background()), nil
return obj1.ModTime(), nil
}
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
@@ -1486,7 +1484,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return int64(0), err
}
@@ -1509,14 +1507,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
obj1, err = rootFs.NewObject(src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
err = obj1.Update(r, objInfo1)
}
return err

View File

@@ -9,9 +9,9 @@ import (
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/ncw/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)

View File

@@ -9,10 +9,10 @@ import (
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)

View File

@@ -7,18 +7,15 @@ package cache_test
import (
"testing"
"github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
})
}

View File

@@ -3,7 +3,6 @@
package cache_test
import (
"context"
"fmt"
"math/rand"
"os"
@@ -12,9 +11,9 @@ import (
"testing"
"time"
"github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/fs"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
@@ -86,11 +85,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test")
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second")
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
@@ -123,11 +122,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test")
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second")
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
@@ -166,7 +165,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
@@ -234,9 +233,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "second/one")
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -257,7 +256,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -271,9 +270,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second")
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -290,9 +289,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third")
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -307,7 +306,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -319,7 +318,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject(context.Background(), "test/one")
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
@@ -367,7 +366,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -379,7 +378,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -390,9 +389,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second")
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -405,9 +404,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third")
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
@@ -422,7 +421,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

View File

@@ -3,11 +3,10 @@
package cache
import (
"context"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
@@ -56,7 +55,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
}
// DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote())
@@ -68,7 +67,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: d.ModTime(ctx).UnixNano(),
CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(),
CacheItems: d.Items(),
CacheType: "Directory",
@@ -111,7 +110,7 @@ func (d *Directory) parentRemote() string {
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime)
}

View File

@@ -3,7 +3,6 @@
package cache
import (
"context"
"fmt"
"io"
"path"
@@ -12,9 +11,9 @@ import (
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
var uploaderMap = make(map[string]*backgroundWriter)
@@ -41,7 +40,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle
type Handle struct {
ctx context.Context
cachedObject *Object
cfs *Fs
memory *Memory
@@ -60,9 +58,8 @@ type Handle struct {
}
// NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{
ctx: ctx,
cachedObject: o,
cfs: cfs,
offset: 0,
@@ -354,7 +351,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc
if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
})
if err != nil {
return nil, err
@@ -364,7 +361,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err
} else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart)
@@ -374,7 +371,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil {
return nil, err
}
@@ -452,7 +449,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort
if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
@@ -465,7 +462,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
@@ -591,7 +588,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil {
b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -601,14 +598,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote))
for thisDir != "" {
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
thisList, err := b.fs.tempFs.List(thisDir)
if err != nil {
break
}
if len(thisList) > 0 {
break
}
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path")
if err != nil {
break

View File

@@ -3,16 +3,15 @@
package cache
import (
"context"
"io"
"path"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
)
const (
@@ -69,7 +68,7 @@ func NewObject(f *Fs, remote string) *Object {
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
@@ -93,13 +92,13 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
CacheType: cacheType,
CacheTs: time.Now(),
}
co.updateData(ctx, o)
co.updateData(o)
return co
}
func (o *Object) updateData(ctx context.Context, source fs.Object) {
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano()
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
@@ -131,20 +130,20 @@ func (o *Object) abs() string {
}
// ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time {
_ = o.refresh(ctx)
func (o *Object) ModTime() time.Time {
_ = o.refresh()
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
_ = o.refresh(context.TODO())
_ = o.refresh()
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
_ = o.refresh(context.TODO())
_ = o.refresh()
return o.CacheStorable
}
@@ -152,18 +151,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error {
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified {
return nil
}
return o.refreshFromSource(ctx, true)
return o.refreshFromSource(true)
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
var err error
@@ -173,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
return nil
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
return err
}
o.updateData(ctx, liveObject)
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
err := o.Object.SetModTime(ctx, t)
err := o.Object.SetModTime(t)
if err != nil {
return err
}
@@ -208,19 +207,19 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error
if o.Object == nil {
err = o.refreshFromSource(ctx, true)
err = o.refreshFromSource(true)
} else {
err = o.refresh(ctx)
err = o.refresh()
}
if err != nil {
return nil, err
}
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -239,8 +238,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
// Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
@@ -255,7 +254,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...)
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
@@ -266,7 +265,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now()
@@ -276,8 +275,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error {
if err := o.refreshFromSource(ctx, false); err != nil {
func (o *Object) Remove() error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
@@ -289,7 +288,7 @@ func (o *Object) Remove(ctx context.Context) error {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)
err := o.Object.Remove()
if err != nil {
return err
}
@@ -307,8 +306,8 @@ func (o *Object) Remove(ctx context.Context) error {
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
@@ -317,10 +316,10 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(ctx, false); err != nil {
if err := o.refreshFromSource(false); err != nil {
return "", err
}
liveHash, err := o.Object.Hash(ctx, ht)
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}

View File

@@ -14,8 +14,8 @@ import (
"sync"
"time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/rclone/rclone/fs"
"golang.org/x/net/websocket"
)

View File

@@ -7,9 +7,9 @@ import (
"strings"
"time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// Memory is a wrapper of transient storage for a go-cache store

View File

@@ -4,7 +4,6 @@ package cache
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
@@ -17,9 +16,9 @@ import (
"time"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
)
// Constants
@@ -1015,7 +1014,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -1024,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
}
var queuedEntries []fs.Object
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo)

File diff suppressed because it is too large Load Diff

View File

@@ -1,605 +0,0 @@
package chunker
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Command line flags
var (
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
)
// test that chunking does not break large uploads
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
defer func() {
// restore original settings (f is pointer, f.opt is struct)
f.opt = saveOpt
_ = f.setChunkNameFormat(f.opt.NameFormat)
}()
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
assert.Equal(t, wantDataFormat, f.dataNameFmt)
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
}
assertFormatValid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
}
assertFormatInvalid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
assert.Equal(t, wantChunkName, gotChunkName)
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactNo, gotXactNo)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
}
// invalid formats
assertFormatInvalid(`chunk-#`)
assertFormatInvalid(`*-chunk`)
assertFormatInvalid(`*-*-chunk-#`)
assertFormatInvalid(`*-chunk-#-#`)
assertFormatInvalid(`#-chunk-*`)
assertFormatInvalid(`*/#`)
assertFormatValid(`*#`)
assertFormatInvalid(`**#`)
assertFormatInvalid(`#*`)
assertFormatInvalid(``)
assertFormatInvalid(`-`)
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
assertParseName(`part_fish_43`, "fish", 42, "", -1)
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", -1)
assertParseName(`fish.chunk.001`, "", -1, "", -1)
assertParseName(`fish.chunk.21`, "", -1, "", -1)
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
// valid temporary control chunks
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
// parsing invalid control chunk names
assertParseName(`fish.chunk.info`, "", -1, "", -1)
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
assertParseName(`fish.chunk._os`, "", -1, "", -1)
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
assertMakeNamePanics("fish", -1, "in", -1)
assertMakeNamePanics("fish", -1, "up", 4)
assertMakeNamePanics("fish", -1, "x", -1)
assertMakeNamePanics("fish", -1, "c", 4)
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", -1)
assertMakeNamePanics("fish", -1, "info_", -1)
assertMakeNamePanics("fish", -2, ".bind", -3)
assertMakeNamePanics("fish", -2, "bind.", -3)
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", 15678)
assertMakeNamePanics("fish", -1, "info_", 999)
assertMakeNamePanics("fish", -2, ".bind", 0)
assertMakeNamePanics("fish", -2, "bind.", 0)
}
func testSmallFileInternals(t *testing.T, f *Fs) {
const dir = "small"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
checkSmallFileInternals := func(obj fs.Object) {
assert.NotNil(t, obj)
o, ok := obj.(*Object)
assert.True(t, ok)
assert.NotNil(t, o)
if o == nil {
return
}
switch {
case !f.useMeta:
// If meta format is "none", non-chunked file (even empty)
// internally is a single chunk without meta object.
assert.Nil(t, o.main)
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
assert.Equal(t, 1, len(o.chunks))
case f.hashAll:
// Consistent hashing forces meta object on small files too
assert.NotNil(t, o.main)
assert.True(t, o.isComposite())
assert.Equal(t, 1, len(o.chunks))
default:
// normally non-chunked file is kept in the Object's main field
assert.NotNil(t, o.main)
assert.False(t, o.isComposite())
assert.Equal(t, 0, len(o.chunks))
}
}
checkContents := func(obj fs.Object, contents string) {
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size())
r, err := obj.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
}
checkHashsum := func(obj fs.Object) {
var ht hash.Type
switch {
case !f.hashAll:
return
case f.useMD5:
ht = hash.MD5
case f.useSHA1:
ht = hash.SHA1
default:
return
}
// even empty files must have hashsum in consistent mode
sum, err := obj.Hash(ctx, ht)
assert.NoError(t, err)
assert.NotEqual(t, sum, "")
}
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
checkHashsum(put)
// objects returned by Put and NewObject must have similar structure
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err)
assert.NotNil(t, obj)
checkSmallFileInternals(obj)
checkContents(obj, contents)
checkHashsum(obj)
_ = obj.Remove(ctx)
_ = put.Remove(ctx) // for good
}
checkSmallFile("emptyfile", "")
checkSmallFile("smallfile", "Ok")
}
func testPreventCorruption(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "corrupted"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = true
contents := random.String(250)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
const overlapMessage = "chunk overlap"
assertOverlapError := func(err error) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), overlapMessage)
}
}
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
}
err := f.Mkdir(ctx, billyChunkName(1))
assertOverlapError(err)
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
f.opt.FailHard = true
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
assertOverlapError(err)
// you can freely read chunks (if you have an object)
r, err := billyChunk4.Open(ctx)
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
assert.NotEqual(t, contents, string(chunkContents))
// but you can't change them
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
assertOverlapError(err)
// Remove isn't special, you can't corrupt files even if you have an object
err = billyChunk4.Remove(ctx)
assertOverlapError(err)
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
assert.NoError(t, err)
require.NotNil(t, willyChunk)
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
assertOverlapError(err)
// operations.Move will return error when chunker's Move refused
// to corrupt target file, but reverts to copy/delete method
// still trying to delete target chunk. Chunker must come to rescue.
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
assertOverlapError(err)
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
}
func testChunkNumberOverflow(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "wreaked"
const wreakNumber = 10200300
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.Error(t, err)
_, err = f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = false
_ = wreak.Remove(ctx)
_ = file.Remove(ctx)
}
func testMetadataInput(t *testing.T, f *Fs) {
const minChunkForTest = 50
if f.opt.ChunkSize < minChunkForTest {
t.Skip("this test requires chunks that fit metadata")
}
const dir = "usermeta"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
o, ok := obj.(*Object)
assert.NotNil(t, ok)
if o != nil {
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
o = nil
}
defer func() {
_ = obj.Remove(ctx)
_ = part.Remove(ctx)
}()
r, err := obj.Open(ctx)
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
runSubtest(pastMeta, "past")
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
runSubtest(futureMeta, "future")
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
if *UploadKilobytes <= 0 {
t.Skip("-upload-kilobytes is not set")
}
testPutLarge(t, f, *UploadKilobytes)
})
t.Run("ChunkNameFormat", func(t *testing.T) {
testChunkNameFormat(t, f)
})
t.Run("SmallFileInternals", func(t *testing.T) {
testSmallFileInternals(t, f)
})
t.Run("PreventCorruption", func(t *testing.T) {
testPreventCorruption(t, f)
})
t.Run("ChunkNumberOverflow", func(t *testing.T) {
testChunkNumberOverflow(t, f)
})
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,58 +0,0 @@
// Test the Chunker filesystem interface
package chunker_test
import (
"flag"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/all" // for integration tests
"github.com/rclone/rclone/backend/chunker"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)
// TestIntegration runs integration tests against a concrete remote
// set by the -remote flag. If the flag is not set, it creates a
// dynamic chunker overlay wrapping a local temporary directory.
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*chunker.Object)(nil),
SkipBadWindowsCharacters: !*UseBadChars,
UnimplementableObjectMethods: []string{
"MimeType",
"GetTier",
"SetTier",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
},
}
if *fstest.RemoteName == "" {
name := "TestChunker"
opt.RemoteName = name + ":"
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
}
fstests.Run(t, &opt)
}

View File

@@ -2,7 +2,6 @@ package crypt
import (
"bytes"
"context"
"crypto/aes"
gocipher "crypto/cipher"
"crypto/rand"
@@ -14,10 +13,10 @@ import (
"sync"
"unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
@@ -69,7 +68,7 @@ type ReadSeekCloser interface {
}
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
@@ -86,7 +85,7 @@ type Cipher interface {
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
@@ -208,6 +207,21 @@ func (c *cipher) putBlock(buf []byte) {
c.buffers.Put(buf)
}
// check to see if the byte string is valid with no control characters
// from 0x00 to 0x1F and is a valid UTF-8 string
func checkValidString(buf []byte) error {
for i := range buf {
c := buf[i]
if c >= 0x00 && c < 0x20 || c == 0x7F {
return ErrorBadDecryptControlChar
}
}
if !utf8.Valid(buf) {
return ErrorBadDecryptUTF8
}
return nil
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
@@ -279,6 +293,10 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if err != nil {
return "", err
}
err = checkValidString(plaintext)
if err != nil {
return "", err
}
return string(plaintext), err
}
@@ -737,22 +755,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
// Open initially with no seek
if offset == 0 && limit < 0 {
// If no offset or limit then open whole file
rc, err = open(ctx, 0, -1)
rc, err = open(0, -1)
} else if offset == 0 {
// If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true
} else {
// Otherwise just read the header to start with
rc, err = open(ctx, 0, int64(fileHeaderSize))
rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true
}
if err != nil {
@@ -765,7 +783,7 @@ func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
}
fh.open = open // will be called by fh.RangeSeek
if doRangeSeek {
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil {
_ = fh.Close()
return nil, err
@@ -885,7 +903,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit.
//
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock()
defer fh.mu.Unlock()
@@ -912,7 +930,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil {
return 0, fh.finish(err)
}
@@ -922,7 +940,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
fh.rc = nil
// Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
}
@@ -951,7 +969,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(context.TODO(), offset, whence, -1)
return fh.RangeSeek(offset, whence, -1)
}
// finish sets the final error and tidies up
@@ -1025,8 +1043,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil {
return nil, err
}

View File

@@ -2,7 +2,6 @@ package crypt
import (
"bytes"
"context"
"encoding/base32"
"fmt"
"io"
@@ -10,8 +9,8 @@ import (
"strings"
"testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -44,6 +43,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
}
func TestValidString(t *testing.T) {
for _, test := range []struct {
in string
expected error
}{
{"", nil},
{"\x01", ErrorBadDecryptControlChar},
{"a\x02", ErrorBadDecryptControlChar},
{"abc\x03", ErrorBadDecryptControlChar},
{"abc\x04def", ErrorBadDecryptControlChar},
{"\x05d", ErrorBadDecryptControlChar},
{"\x06def", ErrorBadDecryptControlChar},
{"\x07", ErrorBadDecryptControlChar},
{"\x08", ErrorBadDecryptControlChar},
{"\x09", ErrorBadDecryptControlChar},
{"\x0A", ErrorBadDecryptControlChar},
{"\x0B", ErrorBadDecryptControlChar},
{"\x0C", ErrorBadDecryptControlChar},
{"\x0D", ErrorBadDecryptControlChar},
{"\x0E", ErrorBadDecryptControlChar},
{"\x0F", ErrorBadDecryptControlChar},
{"\x10", ErrorBadDecryptControlChar},
{"\x11", ErrorBadDecryptControlChar},
{"\x12", ErrorBadDecryptControlChar},
{"\x13", ErrorBadDecryptControlChar},
{"\x14", ErrorBadDecryptControlChar},
{"\x15", ErrorBadDecryptControlChar},
{"\x16", ErrorBadDecryptControlChar},
{"\x17", ErrorBadDecryptControlChar},
{"\x18", ErrorBadDecryptControlChar},
{"\x19", ErrorBadDecryptControlChar},
{"\x1A", ErrorBadDecryptControlChar},
{"\x1B", ErrorBadDecryptControlChar},
{"\x1C", ErrorBadDecryptControlChar},
{"\x1D", ErrorBadDecryptControlChar},
{"\x1E", ErrorBadDecryptControlChar},
{"\x1F", ErrorBadDecryptControlChar},
{"\x20", nil},
{"\x7E", nil},
{"\x7F", ErrorBadDecryptControlChar},
{"£100", nil},
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
{"£100", nil},
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
{"a", nil}, // Valid ASCII
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
} {
actual := checkValidString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
}
}
func TestEncodeFileName(t *testing.T) {
for _, test := range []struct {
in string
@@ -147,6 +209,8 @@ func TestDecryptSegment(t *testing.T) {
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
@@ -640,16 +704,16 @@ var (
// Test test infrastructure first!
func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8)
sink := newRandomSource(1e8)
source := newRandomSource(1E8)
sink := newRandomSource(1E8)
n, err := io.Copy(sink, source)
assert.NoError(t, err)
assert.Equal(t, int64(1e8), n)
assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8)
source = newRandomSource(1E8)
buf := make([]byte, 16)
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
sink = newRandomSource(1E8)
_, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream")
}
@@ -689,23 +753,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
}
func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7)
testEncryptDecrypt(t, 1, 1E7)
}
func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8)
testEncryptDecrypt(t, 32, 1E8)
}
func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8)
testEncryptDecrypt(t, 4096, 1E8)
}
func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8)
testEncryptDecrypt(t, 65536, 1E8)
}
func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8)
testEncryptDecrypt(t, 65537, 1E8)
}
var (
@@ -738,7 +802,7 @@ func TestEncryptData(t *testing.T) {
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works
buf := bytes.NewBuffer(test.in)
@@ -761,7 +825,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{}
@@ -788,7 +852,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -820,7 +884,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd)
@@ -871,7 +935,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -901,7 +965,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset
var reader io.ReadCloser
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext)
if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit)
@@ -942,7 +1006,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) {
continue
}
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err)
check(rc, offset, limit)
@@ -950,14 +1014,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
}
// Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err)
for _, offset := range trials {
for _, limit := range limits {
if offset+limit > len(plaintext) {
continue
}
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err)
check(fh, offset, limit)
@@ -1008,7 +1072,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount {
case 0:
assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1020,11 +1084,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what)
}
callCount++
return open(ctx, underlyingOffset, underlyingLimit)
return open(underlyingOffset, underlyingLimit)
}
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset)
}

View File

@@ -2,20 +2,19 @@
package crypt
import (
"context"
"fmt"
"io"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Globals
@@ -170,10 +169,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
@@ -190,7 +202,6 @@ type Options struct {
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
wrapper fs.Fs
name string
root string
opt Options
@@ -233,7 +244,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
}
// Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
@@ -243,18 +254,18 @@ func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Director
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
*entries = append(*entries, f.newDir(dir))
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
case fs.Directory:
f.addDir(ctx, &newEntries, x)
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
}
@@ -271,12 +282,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
return f.encryptEntries(ctx, entries)
return f.encryptEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
@@ -295,9 +306,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(ctx, entries)
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries)
if err != nil {
return err
}
@@ -306,18 +317,18 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
@@ -343,7 +354,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
@@ -352,13 +363,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ctx, ht)
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
@@ -374,13 +385,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Put)
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream)
}
// Hashes returns the supported hash sets.
@@ -391,15 +402,15 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
@@ -408,12 +419,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge() error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx)
return do()
}
// Copy src to this remote using server side copy operations.
@@ -425,7 +436,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
@@ -434,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
@@ -450,7 +461,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
@@ -459,7 +470,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
@@ -474,7 +485,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
@@ -484,14 +495,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
@@ -500,7 +511,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
@@ -511,21 +522,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
}
return do(ctx)
return do()
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do(ctx)
return do()
}
// UnWrap returns the Fs that this Fs is wrapping
@@ -533,16 +544,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName)
@@ -557,10 +558,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
@@ -590,7 +591,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
}
// Open the src for input
in, err = src.Open(ctx)
in, err = src.Open()
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
@@ -615,75 +616,6 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return m.Sums()[hashType], nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
var (
err error
decrypted string
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -734,7 +666,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -744,7 +676,7 @@ func (o *Object) UnWrap() fs.Object {
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -758,10 +690,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open(ctx, openOptions...)
return o.Object.Open(openOptions...)
}
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
@@ -772,7 +704,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(ctx, newOpenOptions...)
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
@@ -781,17 +713,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(ctx, in, src, options...)
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
_, err := o.f.put(ctx, in, src, options, update)
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir)
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
@@ -802,24 +734,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
return newDir
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
@@ -856,38 +770,10 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -901,17 +787,7 @@ var (
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
)

View File

@@ -6,13 +6,13 @@ import (
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" // for integration tests
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/swift" // for integration tests
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
@@ -21,10 +21,8 @@ func TestIntegration(t *testing.T) {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
})
}
@@ -44,8 +42,6 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -65,8 +61,6 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -86,8 +80,6 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
SkipBadWindowsCharacters: true,
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,6 @@ package drive
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
@@ -11,11 +10,11 @@ import (
"strings"
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
@@ -196,7 +195,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
@@ -210,7 +209,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
@@ -221,10 +220,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.txt")
obj, err := f.NewObject("example2.txt")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
@@ -247,10 +246,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.link.html")
obj, err := f.NewObject("example2.link.html")
require.NoError(t, err)
rc, err := obj.Open(context.Background())
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()

View File

@@ -5,8 +5,8 @@ package drive
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -11,7 +11,6 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"io"
@@ -20,10 +19,10 @@ import (
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
@@ -51,13 +50,15 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
"fields": {partialFields},
}
params.Set("supportsAllDrives", "true")
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
@@ -82,7 +83,6 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
@@ -108,13 +108,12 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload(ctx)
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
@@ -132,8 +131,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
req := rx.makeRequest(ctx, 0, nil, 0)
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
@@ -160,9 +159,9 @@ func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
@@ -195,7 +194,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
@@ -210,7 +209,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert"
)

View File

@@ -22,7 +22,6 @@ of path_display and all will be well.
*/
import (
"context"
"fmt"
"io"
"log"
@@ -38,24 +37,20 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2"
)
const enc = encodings.Dropbox
// Constants
const (
rcloneClientID = "5jcck7diasz0rqy"
@@ -106,14 +101,10 @@ var (
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
)
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
@@ -380,15 +371,14 @@ func (f *Fs) setRoot(root string) {
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: enc.FromStandardPath(objPath),
})
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
notFound = true
err = nil
}
@@ -451,7 +441,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
@@ -464,7 +454,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot
if dir != "" {
root += "/" + dir
@@ -475,7 +465,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for {
if !started {
arg := files.ListFolderArg{
Path: enc.FromStandardPath(root),
Path: root,
Recursive: false,
}
if root == "/" {
@@ -488,7 +478,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
switch e := err.(type) {
case files.ListFolderAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
err = fs.ErrorDirNotFound
}
}
@@ -525,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Only the last element is reliably cased in PathDisplay
entryPath := metadata.PathDisplay
leaf := enc.ToStandardName(path.Base(entryPath))
leaf := path.Base(entryPath)
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now())
@@ -550,22 +541,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root
@@ -583,7 +574,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// create it
arg2 := files.CreateFolderArg{
Path: enc.FromStandardPath(root),
Path: root,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
@@ -595,7 +586,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.slashRoot, dir)
// can't remove root
@@ -609,7 +600,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errors.Wrap(err, "Rmdir")
}
root = enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
@@ -652,7 +642,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
@@ -666,12 +656,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Copy
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: enc.FromStandardPath(srcObj.remotePath()),
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcObj.remotePath()
arg.ToPath = dstObj.remotePath()
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
@@ -700,12 +687,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) (err error) {
func (f *Fs) Purge() (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{
Path: enc.FromStandardPath(f.slashRoot),
})
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
return shouldRetry(err)
})
return err
@@ -720,7 +705,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -734,12 +719,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: enc.FromStandardPath(srcObj.remotePath()),
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcObj.remotePath()
arg.ToPath = dstObj.remotePath()
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
@@ -763,8 +745,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
func (f *Fs) PublicLink(remote string) (link string, err error) {
absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
@@ -775,8 +757,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
return shouldRetry(err)
})
if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
listArg := sharing.ListSharedLinksArg{
Path: absPath,
@@ -817,7 +798,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -838,12 +819,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// ...apparently not necessary
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: enc.FromStandardPath(srcPath),
ToPath: enc.FromStandardPath(dstPath),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcPath
arg.ToPath = dstPath
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg)
return shouldRetry(err)
@@ -856,7 +834,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
func (f *Fs) About() (usage *fs.Usage, err error) {
var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage()
@@ -884,7 +862,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
return hash.Set(hash.Dropbox)
}
// ------------------------------------------------------------
@@ -908,8 +886,8 @@ func (o *Object) Remote() string {
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != DbHashType {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.Dropbox {
return "", hash.ErrUnsupported
}
err := o.readMetaData()
@@ -970,7 +948,7 @@ func (o *Object) readMetaData() (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -982,7 +960,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object
//
// Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then
// re-uploaded to set the time.
@@ -995,13 +973,9 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg)
return shouldRetry(err)
@@ -1010,7 +984,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
switch e := err.(type) {
case files.DownloadAPIError:
// Don't attempt to retry copyright violation errors
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
return nil, fserrors.NoRetryError(err)
}
}
@@ -1125,15 +1099,16 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
fs.Logf(o, "File name disallowed - not uploading")
return nil
}
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
size := src.Size()
var err error
@@ -1153,11 +1128,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: enc.FromStandardPath(o.remotePath()),
})
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
return shouldRetry(err)
})
return err

View File

@@ -4,8 +4,8 @@ package dropbox
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -1,396 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = enc.ToStandardName(item.Filename)
}
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := enc.FromStandardName(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

View File

@@ -1,413 +0,0 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
const enc = encodings.Fichier
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: fileSize,
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

View File

@@ -1,158 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.file.Size
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,120 +0,0 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int64 `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int64 `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,8 +2,6 @@
package ftp
import (
"context"
"crypto/tls"
"io"
"net/textproto"
"os"
@@ -12,19 +10,16 @@ import (
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
const enc = encodings.FTP
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -51,25 +46,11 @@ func init() {
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
},
},
})
@@ -77,14 +58,11 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
Concurrency int `config:"concurrency"`
}
// Fs represents a remote FTP server
@@ -142,18 +120,7 @@ func (f *Fs) Features() *fs.Features {
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -215,7 +182,6 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
@@ -237,11 +203,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
dialAddr := opt.Host + ":" + port
protocol := "ftp://"
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
@@ -268,7 +230,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(ctx, remote)
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
@@ -307,37 +269,10 @@ func translateErrorDir(err error) error {
return err
}
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = enc.ToStandardName(entry.Name)
entry.Target = enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
Type: ftp.EntryTypeFolder,
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
@@ -345,13 +280,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(dirFromStandardPath(dir))
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
entryToStandard(file)
if file.Name == base {
return file, nil
}
@@ -361,7 +295,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
if err != nil {
@@ -405,42 +339,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
}
var listErr error
var files []*ftp.Entry
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
return
}
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
files, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorDir(err)
}
// Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no
// entries found.
@@ -455,7 +364,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for i := range files {
object := files[i]
entryToStandard(object)
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
@@ -496,7 +404,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
if err != nil {
@@ -506,13 +414,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
fs: f,
remote: src.Remote(),
}
err = o.Update(ctx, in, src, options...)
err = o.Update(in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// getInfo reads the FileInfo for a path
@@ -525,21 +433,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(dirFromStandardPath(dir))
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
entryToStandard(file)
if file.Name == base {
if files[i].Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
IsDir: file.Type == ftp.EntryTypeFolder,
Size: files[i].Size,
ModTime: files[i].Time,
IsDir: files[i].Type == ftp.EntryTypeFolder,
}
return info, nil
}
@@ -549,7 +455,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" {
return nil
}
@@ -571,7 +476,7 @@ func (f *Fs) mkdir(abspath string) error {
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(dirFromStandardPath(abspath))
err = c.MakeDir(abspath)
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
@@ -593,7 +498,7 @@ func (f *Fs) mkParentDir(remote string) error {
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
@@ -602,18 +507,18 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
// Move renames a remote file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
@@ -628,14 +533,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
enc.FromStandardPath(path.Join(f.root, remote)),
path.Join(srcObj.fs.root, srcObj.remote),
path.Join(f.root, remote),
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
}
dstObj, err := f.NewObject(ctx, remote)
dstObj, err := f.NewObject(remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
}
@@ -650,7 +555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -682,8 +587,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
dirFromStandardPath(srcPath),
dirFromStandardPath(dstPath),
srcPath,
dstPath,
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -713,7 +618,7 @@ func (o *Object) Remote() string {
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -723,12 +628,12 @@ func (o *Object) Size() int64 {
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
return nil
}
@@ -789,7 +694,7 @@ func (f *ftpReadCloser) Close() error {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1
@@ -809,7 +714,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
fd, err := c.RetrFrom(path, uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
@@ -823,7 +728,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
@@ -833,7 +738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove(ctx)
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
} else {
@@ -844,7 +749,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(enc.FromStandardPath(path), in)
err = c.Stor(path, in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
@@ -859,7 +764,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
@@ -868,13 +773,13 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err
}
if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote)
err = o.fs.Rmdir(o.remote)
} else {
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(enc.FromStandardPath(path))
err = c.Delete(path)
o.fs.putFtpConnection(&c, err)
}
return err

View File

@@ -4,8 +4,8 @@ package ftp_test
import (
"testing"
"github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/ftp"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -23,23 +23,23 @@ import (
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
@@ -61,7 +61,7 @@ const (
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -69,8 +69,6 @@ var (
}
)
const enc = encodings.GoogleCloudStorage
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -266,16 +264,16 @@ type Options struct {
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *fs.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -300,18 +298,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
@@ -343,24 +341,21 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
@@ -370,15 +365,8 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client
// Parse config into Options struct
@@ -418,19 +406,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -440,19 +431,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.rootBucket != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -463,7 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -471,7 +463,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
@@ -481,8 +473,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list to handle an object.
@@ -493,24 +485,20 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
if directory != "" {
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
@@ -523,38 +511,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
if !recurse {
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
remote = enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
remote := enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
remote := object.Name[rootLength:]
// is this a directory marker?
if isDirectory && object.Size == 0 {
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -571,23 +552,32 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
@@ -600,12 +590,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
@@ -613,14 +606,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
@@ -640,15 +633,11 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -667,44 +656,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
@@ -713,88 +681,94 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return f.cache.Create(bucket, func() error {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
return f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(err)
})
}, nil)
_, err = insertBucket.Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(err)
})
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
@@ -811,9 +785,8 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
@@ -822,7 +795,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction
dstObj := &Object{
@@ -830,13 +802,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
@@ -873,7 +845,7 @@ func (o *Object) Remote() string {
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
@@ -919,33 +891,24 @@ func (o *Object) setMetaData(info *storage.Object) {
}
}
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
return fs.ErrorObjectNotFound
}
}
return nil, err
}
return object, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
}
o.setMetaData(object)
@@ -956,8 +919,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -973,28 +936,16 @@ func metadataFromModTime(modTime time.Time) map[string]string {
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// read the complete existing object first
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
@@ -1010,13 +961,11 @@ func (o *Object) Storable() bool {
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1043,27 +992,27 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("")
if err != nil {
return err
}
modTime := src.ModTime(ctx)
modTime := src.ModTime()
object := storage.Object{
Bucket: bucket,
Name: bucketPath,
ContentType: fs.MimeType(ctx, src),
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
newObject, err = insertObject.Do()
return shouldRetry(err)
})
if err != nil {
@@ -1075,17 +1024,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
return o.mimeType
}

View File

@@ -5,8 +5,8 @@ package googlecloudstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -1,148 +0,0 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -1,311 +0,0 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -1,190 +0,0 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,307 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -1,335 +0,0 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -1,495 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -5,7 +5,6 @@
package http
import (
"context"
"io"
"mime"
"net/http"
@@ -13,16 +12,15 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html"
)
@@ -47,21 +45,6 @@ func init() {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
@@ -78,26 +61,6 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -105,10 +68,8 @@ If you set this option, rclone will not do the HEAD request. This will mean
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
}
// Fs stores the interface to the remote HTTP files
@@ -146,7 +107,6 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -154,10 +114,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
@@ -183,15 +139,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
isFile = true
}
}
@@ -256,12 +207,12 @@ func (f *Fs) Precision() time.Duration {
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.stat()
if err != nil {
return nil, err
}
@@ -364,22 +315,8 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
@@ -388,14 +325,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
res, err := f.httpClient.Get(URL)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
@@ -429,57 +359,38 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(ctx, dir)
names, err := f.readDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, timeUnset))
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else {
in <- remote
file := &Object{
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}
close(in)
wg.Wait()
return entries, nil
}
@@ -488,12 +399,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
@@ -516,7 +427,7 @@ func (o *Object) Remote() string {
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
@@ -526,7 +437,7 @@ func (o *Object) Size() int64 {
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
return o.modTime
}
@@ -536,21 +447,9 @@ func (o *Object) url() string {
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
func (o *Object) stat() error {
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
@@ -581,7 +480,7 @@ func (o *Object) stat(ctx context.Context) error {
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly
}
@@ -591,19 +490,17 @@ func (o *Object) Storable() bool {
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
o.fs.addHeaders(req)
// Do the request
res, err := o.fs.httpClient.Do(req)
@@ -620,27 +517,27 @@ func (f *Fs) Hashes() hash.Set {
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
return errorReadOnly
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
func (o *Object) MimeType() string {
return o.contentType
}

View File

@@ -1,7 +1,6 @@
package http
import (
"context"
"fmt"
"io/ioutil"
"net/http"
@@ -10,15 +9,14 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -27,7 +25,6 @@ var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,16 +32,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
@@ -55,9 +44,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
"headers": strings.Join(headers, ","),
"type": "http",
"url": ts.URL,
}
// return a function to tidy up
@@ -76,7 +64,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List(context.Background(), "")
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
@@ -132,7 +120,7 @@ func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three")
entries, err := f.List("three")
require.NoError(t, err)
sort.Sort(entries)
@@ -150,7 +138,7 @@ func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
@@ -160,7 +148,7 @@ func TestNewObject(t *testing.T) {
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
@@ -170,7 +158,7 @@ func TestNewObject(t *testing.T) {
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")
o, err = f.NewObject("not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
@@ -179,11 +167,11 @@ func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
// Test normal read
fd, err := o.Open(context.Background())
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
@@ -191,7 +179,7 @@ func TestOpen(t *testing.T) {
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
@@ -203,12 +191,12 @@ func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
}
func TestIsAFileRoot(t *testing.T) {
@@ -228,7 +216,7 @@ func TestIsAFileSubDir(t *testing.T) {
f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "")
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)

View File

@@ -24,7 +24,7 @@
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>

View File

@@ -1,12 +1,11 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
err = a.f.getCredentials()
if err == nil {
break
}

View File

@@ -7,7 +7,6 @@ package hubic
// to be revisted after some actual experience.
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -16,16 +15,16 @@ import (
"strings"
"time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
@@ -116,12 +115,11 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err

View File

@@ -4,16 +4,14 @@ package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/hubic"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
})
}

View File

@@ -46,82 +46,6 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
type Flag bool
@@ -140,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
@@ -169,8 +102,8 @@ GET http://www.jottacloud.com/JFS/<account>
</user>
*/
// DriveInfo represents a Jottacloud account
type DriveInfo struct {
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
@@ -347,3 +280,43 @@ func (e *Error) Error() string {
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@ import (
"io"
"testing"
"github.com/rclone/rclone/lib/readers"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

View File

@@ -4,8 +4,8 @@ package jottacloud_test
import (
"testing"
"github.com/rclone/rclone/backend/jottacloud"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,77 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaCloud reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,7 +1,6 @@
package koofr
import (
"context"
"encoding/base64"
"errors"
"fmt"
@@ -11,19 +10,16 @@ import (
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/hash"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
const enc = encodings.Koofr
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
@@ -43,12 +39,6 @@ func init() {
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
@@ -69,7 +59,6 @@ type Options struct {
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
}
// A Fs is a representation of a remote Koofr Fs
@@ -116,7 +105,7 @@ func (o *Object) Remote() string {
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime(ctx context.Context) time.Time {
func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
@@ -131,7 +120,7 @@ func (o *Object) Fs() fs.Info {
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
@@ -149,15 +138,14 @@ func (o *Object) Storable() bool {
}
// SetModTime is not supported
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
return fs.ErrorCantSetModTimeWithoutDelete
func (o *Object) SetModTime(mtime time.Time) error {
return nil
}
// Open opens the Object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -174,6 +162,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
@@ -182,13 +177,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
// Update updates the Object contents
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
putopts := &koofrclient.PutOptions{
ForceOverwrite: true,
NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
@@ -197,7 +190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
@@ -206,7 +199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove deletes the remote Object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
@@ -232,10 +225,7 @@ func (f *Fs) Features() *fs.Features {
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
if !f.opt.SetMTime {
return fs.ModTimeNotSupported
}
return time.Millisecond
return fs.ModTimeNotSupported
}
// Hashes returns a set of hashes are Provided by the Fs
@@ -245,7 +235,7 @@ func (f *Fs) Hashes() hash.Set {
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return enc.FromStandardPath(path.Join("/", f.root, part))
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
@@ -296,7 +286,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
@@ -307,21 +297,20 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
// List returns a list of items in a directory
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
remote := path.Join(dir, enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: remote,
remote: path.Join(dir, file.Name),
}
}
}
@@ -329,7 +318,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
@@ -345,13 +334,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
}
// Put updates a remote Object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
putopts := &koofrclient.PutOptions{
ForceOverwrite: true,
NoRename: true,
OverwriteIgnoreNonExisting: true,
SetModified: &mtime,
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
@@ -360,7 +347,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if err != nil {
return nil, err
}
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
@@ -372,8 +359,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
@@ -449,13 +436,13 @@ func (f *Fs) mkdir(fullPath string) error {
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
@@ -471,25 +458,24 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(ctx, remote)
return f.NewObject(remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
@@ -502,11 +488,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(ctx, remote)
return f.NewObject(remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
@@ -526,7 +512,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// About reports space usage (with a MB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
@@ -542,7 +528,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
// Purge purges the complete Fs
func (f *Fs) Purge(ctx context.Context) error {
func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
@@ -594,7 +580,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)

View File

@@ -3,7 +3,7 @@ package koofr_test
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote

View File

View File

@@ -3,15 +3,14 @@
package local
import (
"context"
"syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {

View File

@@ -3,18 +3,17 @@
package local
import (
"context"
"syscall"
"unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),

View File

@@ -1,9 +0,0 @@
//+build darwin
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
const enc = encodings.LocalMacOS

View File

@@ -1,9 +0,0 @@
//+build !windows,!darwin
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
const enc = encodings.LocalUnix

View File

@@ -1,9 +0,0 @@
//+build windows
package local
import (
"github.com/rclone/rclone/fs/encodings"
)
const enc = encodings.LocalWindows

Some files were not shown because too many files have changed in this diff Show More