1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-30 15:13:55 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
094cc27621 Version 1.02 2014-07-19 13:12:20 +01:00
1912 changed files with 7141 additions and 665548 deletions

View File

@@ -1,46 +0,0 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -1,34 +0,0 @@
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
- store_artifacts:
path: /tmp/rclone.dist

6
.gitignore vendored
View File

@@ -1,7 +1,9 @@
*~
_junk/
rclone
rclonetest/rclonetest
build
docs/public
rclone.iml
.idea
README.html
README.txt
rclone.1

View File

@@ -1,14 +0,0 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -1,2 +0,0 @@
default_dependencies: false
cli: rclone

View File

@@ -1,51 +1,11 @@
language: go
sudo: required
dist: trusty
os:
- linux
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- tip
before_install:
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- git fetch --unshallow --tags
- make vars
- make build_dep
- 1.1.2
- 1.2.2
- 1.3
- tip
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: 1.11.x
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
all_branches: true
go: 1.11.x
condition: $TRAVIS_PULL_REQUEST == false
- go get ./...
- go test -v ./...

View File

@@ -1,351 +0,0 @@
# Contributing to rclone #
This is a short guide on how to contribute things to rclone.
## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead
of filing an issue.
When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone).
Now in your terminal
go get -u github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
Make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
When ready - run the unit tests for the code you changed
go test -v
Note that you may need to make a test remote, eg `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you
* Add documentation for a new feature (see below for where)
* Add unit tests for a new feature
* squash commits down to one per feature
* rebase to master `git rebase master`
When you are done with that
git push origin my-new-feature
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to Github with `--force`.
## Testing ##
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
run against any of the backends. This is done by making specially
named remotes in the default config file.
If you wanted to test changes in the `drive` backend, then you would
need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
cd backend/drive
go test -v
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -subdir
cd fs/operations
go test -v -remote TestDrive:
If you want to run all the integration tests against all the remotes,
then change into the project root and run
make test
This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ##
Rclone code is organised into a small number of top level directories
with modules beneath.
* backend - the rclone backends for interfacing to cloud providers -
* all - import this to load all the cloud providers
* ...providers
* bin - scripts for use while building or maintaining rclone
* cmd - the rclone commands
* all - import this to load all the commands
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
* config - manage the config file and flags
* driveletter - detect if a name is a drive letter
* filter - implements include/exclude filtering
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclones hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
* object - in memory Fs objects
* operations - primitives for sync, eg Copy, Move
* sync - sync directories
* walk - walk a directory
* fstest - provides integration test framework
* fstests - integration tests for the backends
* mockdir - mocks an fs.Directory
* mockobject - mocks an fs.Object
* test_all - Runs integration tests for everything
* graphics - the images used in the website etc
* lib - libraries used by the backend
* atexit - register functions to run when rclone exits
* dircache - directory ID to name caching
* oauthutil - helpers for using oauth
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
file.
## Commit messages ##
Please make the first line of your commit message a summary of the
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
change will get linked into the issue.
Here is an example of a short commit message:
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
Fixes #1498
```
## Adding a dependency ##
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
export GO111MODULE=on
go get github.com/ncw/new_dependency
go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ##
If you need to update a dependency then run
export GO111MODULE=on
go get -u github.com/pkg/errors
go mod vendor
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
stable release. Check in the changes in a single commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend ##
If you update a backend then please run the unit tests and the
integration tests for that backend.
Assuming the backend is called `remote`, make create a config entry
called `TestRemote` for the tests to use.
Now `cd remote` and run `go test -v` to run the unit tests.
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
integration tests.
The next section goes into more detail about the tests.
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
Research
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
Getting going
* Create `backend/remote/remote.go` (copy this from a similar remote)
* box is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
* Make sure all tests pass with `go test -v`
Integration tests
* Add your fs to `fstest/test_all/test_all.go`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

View File

@@ -1,43 +0,0 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,87 +0,0 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
Rclone uses the labels like this:
* `bug` - a definite verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation etc
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
* `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
The milestones have these meanings:
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list ##
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
## TODO ##
I should probably make a dev@rclone.org to register with cloud providers.

File diff suppressed because it is too large Load Diff

13459
MANUAL.md

File diff suppressed because it is too large Load Diff

13287
MANUAL.txt

File diff suppressed because it is too large Load Diff

227
Makefile
View File

@@ -1,232 +1,59 @@
SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.11
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
endif
.PHONY: rclone vars version
rclone: *.go */*.go
@go version
go build
rclone:
touch fs/version.go
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
doc: rclone.1 README.html README.txt
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'"
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
version:
@echo '$(TAG)'
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
# Full suite of integration tests
test: rclone
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
ifdef FULL_TESTS
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
ifdef FULL_TESTS
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
endif
# Get the release dependencies
release_dep:
go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Update dependencies
update:
GO111MODULE=on go get -u ./...
GO111MODULE=on go tidy
GO111MODULE=on go vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
rclone gendocs docs/content/commands/
rcdocs: rclone
bin/make_rc_docs.sh
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
install -t ${DESTDIR}/usr/bin rclone
clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
website:
cd docs && hugo
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
check_sign:
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
./rclone -v sync docs/public memstore:www-rclone-org
upload:
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github:
./bin/upload-github $(TAG)
./rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
./cross-compile $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
log_since_last_release:
git log $(LAST_TAG)..
compile_all:
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -v sync $(BETA_UPLOAD) build/
serve: website
serve:
cd docs && hugo server -v -w
tag: doc
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo "And finally run make retag before make cross etc"
retag:
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe
git tag -f $(LAST_TAG)

345
README.md
View File

@@ -1,62 +1,325 @@
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
% rclone(1) User Manual
% Nick Craig-Wood
% Jul 7, 2014
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
Rclone
======
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
Rclone is a command line program to sync files and directories to and from
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Jottacloud
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features
* MD5/SHA1 hashes checked at all times for file integrity
* MD5SUMs checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption (Crypt)
* Optional FUSE mount
* Sync mode to make a directory identical
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
See the Home page for more documentation and configuration walkthroughs.
* https://rclone.org/
* http://rclone.org/
Install
-------
Rclone is a Go program and comes as a single binary file.
Download the binary for your OS from
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
See below for how to specify the source and destination paths.
Subcommands
-----------
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with sizes and timestamps.
rclone lsd [remote:path]
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
Make the path if it doesn't already exist
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
Remove the path and all of its contents.
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone md5sum remote:path
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
Paths are specified as remote:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Google drive stores modification times accurate to 1 ms natively.
Dropbox
-------
Paths are specified as remote:path Dropbox paths may be as deep as required.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
Google Cloud Storage
--------------------
Paths are specified as remote:path Google Cloud Storage paths may be
as deep as required.
The initial setup for Google Cloud Storage involves getting a token
from Google which you need to do in your browser. `rclone config`
walks you through it.
To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or send pull requests.
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Your name goes here!

View File

@@ -1,33 +0,0 @@
Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
Making a release
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX"
* make retag
* git push --tags origin master
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* make tarball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
* git status
* git add new files
* git commit -a -v

View File

@@ -1,59 +0,0 @@
package alias
import (
"errors"
"path"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "alias",
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.Remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
if err != nil {
return nil, err
}
root = path.Join(fsPath, filepath.ToSlash(root))
if configName == "local" {
return fs.NewFs(root)
}
return fs.NewFs(configName + ":" + root)
}

View File

@@ -1,104 +0,0 @@
package alias
import (
"fmt"
"path"
"path/filepath"
"sort"
"testing"
_ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestAlias"
)
func prepare(t *testing.T, root string) {
config.LoadConfig()
// Configure the remote
config.FileSet(remoteName, "type", "alias")
config.FileSet(remoteName, "remote", root)
}
func TestNewFS(t *testing.T) {
type testEntry struct {
remote string
size int64
isDir bool
}
for testi, test := range []struct {
remoteRoot string
fsRoot string
fsList string
wantOK bool
entries []testEntry
}{
{"", "", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}},
{"", "four", "", true, []testEntry{
{"five", -1, true},
{"under four.txt", 9, false},
}},
{"", "", "four", true, []testEntry{
{"four/five", -1, true},
{"four/under four.txt", 9, false},
}},
{"four", "..", "", true, []testEntry{
{"four", -1, true},
{"one%.txt", 6, false},
{"three", -1, true},
{"two.html", 7, false},
}},
{"four", "../three", "", true, []testEntry{
{"underthree.txt", 9, false},
}},
} {
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
require.NoError(t, err, what)
prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what)
gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what)
sort.Sort(gotEntries)
require.Equal(t, len(test.entries), len(gotEntries), what)
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}
}
}
func TestNewFSNoRemote(t *testing.T) {
prepare(t, "")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
}
func TestNewFSInvalidRemote(t *testing.T) {
prepare(t, "not_existing_test_remote:")
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
require.Error(t, err)
require.Nil(t, f)
}

View File

@@ -1 +0,0 @@
apple

View File

@@ -1 +0,0 @@
beetroot

View File

@@ -1 +0,0 @@
hello

View File

@@ -1 +0,0 @@
rutabaga

View File

@@ -1 +0,0 @@
potato

View File

@@ -1,30 +0,0 @@
package all
import (
// Active file systems
_ "github.com/ncw/rclone/backend/alias"
_ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/ncw/rclone/backend/b2"
_ "github.com/ncw/rclone/backend/box"
_ "github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
_ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/ncw/rclone/backend/ftp"
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
// Test AmazonCloudDrive filesystem interface
// +build acd
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
fstests.Run(t)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,20 +0,0 @@
// Test AzureBlob filesystem interface
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob_test
import (
"testing"
"github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil),
})
}

View File

@@ -1,6 +0,0 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build freebsd netbsd openbsd plan9 solaris !go1.8
package azureblob

View File

@@ -1,312 +0,0 @@
package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs/fserrors"
)
// Error describes a B2 error response
type Error struct {
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
Code string `json:"code"` // A single-identifier code that identifies the error.
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
return e.Status == 403 // 403 errors shouldn't be retried
}
var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the java call
// Date.setTime(long time).
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
//
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if time.Time(t).IsZero() {
return false
}
if time.Time(s).IsZero() {
return false
}
return time.Time(t).Equal(time.Time(s))
}
// File is info about a file
type File struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
Size int64 `json:"size"` // The number of bytes in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
type ListBucketsResponse struct {
Buckets []Bucket `json:"buckets"`
}
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
}
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
type ListFileNamesResponse struct {
Files []File `json:"files"` // An array of objects, each one describing one file.
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
}
// GetUploadURLRequest is passed to b2_get_upload_url
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
}
// GetUploadURLResponse is received from b2_get_upload_url
type GetUploadURLResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
AccountID string `json:"accountId"` // Your account ID.
BucketID string `json:"bucketId"` // The bucket that the file is in.
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
type DeleteBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
}
// DeleteFileRequest is used to delete a file version
type DeleteFileRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
Name string `json:"fileName"` // The name of this file.
}
// HideFileRequest is used to delete a file
type HideFileRequest struct {
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
Name string `json:"fileName"` // The name of the file to hide.
}
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
type GetFileInfoRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
}
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
//
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
// Java call Date.setTime(long time).
//
// If the caller knows the SHA1 of the entire large file being
// uploaded, Backblaze recommends using large_file_sha1 as the name,
// and a 40 byte hex string representing the SHA1.
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
type GetUploadPartURLRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// GetUploadPartURLResponse is received from b2_get_upload_url
type GetUploadPartURLResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
}
// UploadPartResponse is the response to b2_upload_part
type UploadPartResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
}
// FinishLargeFileRequest is passed to b2_finish_large_file
//
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
//
// Large files do not have a SHA1 checksum. The value will always be "none".
type FinishLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
}
// CancelLargeFileRequest is passed to b2_finish_large_file
//
// The response is a CancelLargeFileResponse
type CancelLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// CancelLargeFileResponse is the response to CancelLargeFileRequest
type CancelLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
Name string `json:"fileName"` // The name of this file.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}

View File

@@ -1,87 +0,0 @@
package api_test
import (
"testing"
"time"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
func TestTimestampMarshalJSON(t *testing.T) {
resB, err := t0.MarshalJSON()
res := string(resB)
require.NoError(t, err)
assert.Equal(t, "3661123", res)
resB, err = t1.MarshalJSON()
res = string(resB)
require.NoError(t, err)
assert.Equal(t, "981173106123", res)
}
func TestTimestampUnmarshalJSON(t *testing.T) {
var tActual api.Timestamp
err := tActual.UnmarshalJSON([]byte("981173106123"))
require.NoError(t, err)
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())
assert.False(t, t1.IsZero())
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
package b2
import (
"testing"
"time"
"github.com/ncw/rclone/fstest"
)
// Test b2 string encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct {
fullyEncoded string
minimallyEncoded string
plainText string
}{
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
}
func TestUrlEncode(t *testing.T) {
for _, test := range encodeTest {
got := urlEncode(test.plainText)
if got != test.minimallyEncoded && got != test.fullyEncoded {
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
}
}
}
func TestTimeString(t *testing.T) {
for _, test := range []struct {
in time.Time
want string
}{
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
} {
got := timeString(test.in)
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
}
}
func TestParseTimeString(t *testing.T) {
for _, test := range []struct {
in string
want time.Time
wantError string
}{
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
{"", time.Time{}, ""},
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
} {
o := Object{}
err := o.parseTimeString(test.in)
got := o.modTime
var gotError string
if err != nil {
gotError = err.Error()
}
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
if test.wantError != gotError {
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
}
}
}

View File

@@ -1,17 +0,0 @@
// Test B2 filesystem interface
package b2_test
import (
"testing"
"github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:",
NilObject: (*b2.Object)(nil),
})
}

View File

@@ -1,433 +0,0 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"fmt"
gohash "hash"
"io"
"strings"
"sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
type hashAppendingReader struct {
h gohash.Hash
in io.Reader
hexSum string
hexReader io.Reader
}
// Read returns bytes all bytes from the original reader, then the hex sum
// of what was read so far, then EOF.
func (har *hashAppendingReader) Read(b []byte) (int, error) {
if har.hexReader == nil {
n, err := har.in.Read(b)
if err == io.EOF {
har.in = nil // allow GC
err = nil // allow reading hexSum before EOF
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
har.hexReader = strings.NewReader(har.hexSum)
}
return n, err
}
return har.hexReader.Read(b)
}
// AdditionalLength returns how many bytes the appended hex sum will take up.
func (har *hashAppendingReader) AdditionalLength() int {
return hex.EncodedLen(har.h.Size())
}
// HexSum returns the hash sum as hex. It's only available after the original
// reader has EOF'd. It's an empty string before that.
func (har *hashAppendingReader) HexSum() string {
return har.hexSum
}
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
// after the original reader reaches EOF. The increased size depends on the
// given hash, which may be queried through AdditionalLength()
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
withHash := io.TeeReader(in, h)
return &hashAppendingReader{h: h, in: withHash}
}
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
f: f,
o: o,
in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
}
return up, nil
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
}
return upload, nil
}
// returnUploadURL returns the UploadURL to the cache
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
if upload == nil {
return
}
up.uploadMu.Lock()
up.uploads = append(up.uploads, upload)
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
// An upload authorization token, from b2_get_upload_part_url.
//
// X-Bz-Part-Number
//
// A number from 1 to 10000. The parts uploaded for one file
// must have contiguous numbers, starting with 1.
//
// Content-Length
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
RootURL: upload.UploadURL,
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &size,
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
upload = nil
}
up.returnUploadURL(upload)
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
}
var request = api.FinishLargeFileRequest{
ID: up.id,
SHA1s: up.sha1s,
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
}
// cancel aborts the large upload
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
}
var request = api.CancelLargeFileRequest{
ID: up.id,
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(err, errs)
}

View File

@@ -1,192 +0,0 @@
// Package api has type definitions for box
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
ItemStatusActive = "active"
ItemStatusTrashed = "trashed"
ItemStatusDeleted = "deleted"
)
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
}
// ModTime returns the modification time of the item
func (i *Item) ModTime() (t time.Time) {
t = time.Time(i.ContentModifiedAt)
if t.IsZero() {
t = time.Time(i.ModifiedAt)
}
return t
}
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
type Parent struct {
ID string `json:"id"`
}
// CreateFolder is the request for Create Folder
type CreateFolder struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadFile is the request for Upload File
type UploadFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileMove is the request for Upload File to change name and parent
type UpdateFileMove struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// CopyFile is the request for Copy File
type CopyFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
FileSize int64 `json:"file_size"`
FileName string `json:"file_name,omitempty"` // optional for update
}
// UploadSessionResponse is returned from Create Upload Session
type UploadSessionResponse struct {
TotalParts int `json:"total_parts"`
PartSize int64 `json:"part_size"`
SessionEndpoints struct {
ListParts string `json:"list_parts"`
Commit string `json:"commit"`
UploadPart string `json:"upload_part"`
Status string `json:"status"`
Abort string `json:"abort"`
} `json:"session_endpoints"`
SessionExpiresAt Time `json:"session_expires_at"`
ID string `json:"id"`
Type string `json:"type"`
NumPartsProcessed int `json:"num_parts_processed"`
}
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
Sha1 string `json:"sha1"`
}
// UploadPartResponse is returned from the upload part call
type UploadPartResponse struct {
Part Part `json:"part"`
}
// CommitUpload is used in the Commit Upload call
type CommitUpload struct {
Parts []Part `json:"parts"`
Attributes struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Box filesystem interface
package box_test
import (
"testing"
"github.com/ncw/rclone/backend/box"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestBox:",
NilObject: (*box.Object)(nil),
})
}

View File

@@ -1,275 +0,0 @@
// multpart upload for box
package box
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"sync"
"time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
RootURL: uploadURL,
}
request := api.UploadSessionRequest{
FileSize: size,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
opts.Path = "/files/" + o.id + "/upload_sessions"
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = replaceReservedChars(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
}
// sha1Digest produces a digest using sha1 as per RFC3230
func sha1Digest(digest []byte) string {
return "sha=" + base64.StdEncoding.EncodeToString(digest)
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
Method: "PUT",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return response, nil
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
RootURL: uploadURL,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum),
},
}
request := api.CommitUpload{
Parts: parts,
}
request.Attributes.ContentModifiedAt = api.Time(modTime)
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
// For discussion of this value see:
// https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10
var tries int
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
})
delay := defaultDelay
why := "unknown"
if err != nil {
// Sometimes we get 400 Error with
// parts_mismatch immediately after uploading
// the last part. Ignore this error and wait.
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
why = err.Error()
} else {
return nil, err
}
} else {
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated:
break outer
case http.StatusAccepted:
why = "not ready yet"
delayString := resp.Header.Get("Retry-After")
if delayString != "" {
delay, err = strconv.Atoi(delayString)
if err != nil {
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
delay = defaultDelay
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
time.Sleep(time.Duration(delay) * time.Second)
}
if tries >= maxTries {
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
}
return result, nil
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
NoResponse: true,
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
}()
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// Upload the chunks
remaining := size
position := int64(0)
parts := make([]api.Part, session.TotalParts)
hash := sha1.New()
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
}
// Make the global hash (must be done sequentially)
_, _ = hash.Write(buf)
// Transfer the chunk
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64) {
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
}
return
}
parts[part] = partResponse.Part
}(part, position)
// ready for next block
remaining -= chunkSize
position += chunkSize
}
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
return err
}
// Finalise the upload session
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

1586
backend/cache/cache.go vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,78 +0,0 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,124 +0,0 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,21 +0,0 @@
// Test Cache filesystem interface
// +build !plan9
package cache_test
import (
"testing"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
})
}

View File

@@ -1,6 +0,0 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package cache

View File

@@ -1,455 +0,0 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

View File

@@ -1,455 +0,0 @@
// +build !plan9
package cache_test
import (
"math/rand"
"os"
"path"
"strconv"
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require"
)
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
}
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
// create some rand test data
testSize := int64(524288000)
testReader := runInstance.randomReader(t, testSize)
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(524416032), ti.Size())
} else {
require.Equal(t, testSize, ti.Size())
}
de1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, de1, 1)
runInstance.completeBackgroundUpload(t, "one", bu)
// check if it was removed from temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
// check if it can be read
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
require.NoError(t, err)
require.Len(t, data2, 1024)
}
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(10485760)
testReader := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
time.Sleep(time.Second * 5)
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
//require.NoError(t, err)
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
require.NoError(t, err)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second/test")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
require.NoError(t, err)
err = rootFs.Mkdir("one/test")
require.NoError(t, err)
err = rootFs.Mkdir("second")
require.NoError(t, err)
// create some rand test data
testSize := int64(1048576)
testReader := runInstance.randomReader(t, testSize)
testReader2 := runInstance.randomReader(t, testSize)
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
require.True(t, os.IsNotExist(err))
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
require.False(t, os.IsNotExist(err))
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
require.True(t, os.IsNotExist(err))
de1, err := runInstance.list(t, rootFs, "one/test")
require.NoError(t, err)
require.Len(t, de1, 1)
// check if it can be read
de1, err = runInstance.list(t, rootFs, "second")
require.NoError(t, err)
require.Len(t, de1, 1)
}
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
require.NoError(t, err)
minSize := 5242880
maxSize := 10485760
totalFiles := 10
rand.Seed(time.Now().Unix())
lastFile := ""
for i := 0; i < totalFiles; i++ {
size := int64(rand.Intn(maxSize-minSize) + minSize)
testReader := runInstance.randomReader(t, size)
remote := "test/" + strconv.Itoa(i) + ".bin"
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
// validate that it exists in temp fs
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
require.NoError(t, err)
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
if runInstance.wrappedIsExternal && i < totalFiles-1 {
time.Sleep(time.Second * 3)
}
lastFile = remote
}
// check if cache lists all files, likely temp upload didn't finish yet
de1, err := runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
// wait for background uploader to do its thing
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
// retry until we have no more temp files and fail if they don't go down to 0
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
require.True(t, os.IsNotExist(err))
// check if cache lists all files
de1, err = runInstance.list(t, rootFs, "test")
require.NoError(t, err)
require.Len(t, de1, totalFiles)
}
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test DirMove - allowed
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("second/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.NoError(t, err)
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.Error(t, err)
var started bool
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
require.NoError(t, err)
require.False(t, started)
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Rmdir - allowed
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
require.False(t, started)
require.NoError(t, err)
// test Move/Rename -- allowed
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.NoError(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/second")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.Error(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.Error(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update -- allowed
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err)
obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2))
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
if runInstance.rootIsCrypt {
require.Equal(t, int64(67), tmpInfo.Size())
} else {
require.Equal(t, int64(len(data2)), tmpInfo.Size())
}
// test SetModTime -- allowed
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
require.NoError(t, err)
require.NotEqual(t, secondModTime, firstModTime)
require.NotEqual(t, time.Time{}, firstModTime)
require.NotEqual(t, time.Time{}, secondModTime)
}
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
// create some rand test data
runInstance.mkdir(t, rootFs, "test")
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// check if it can be read
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data1)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
require.NoError(t, err)
// test DirMove
err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported {
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
require.Error(t, err)
}
// test Rmdir
err = runInstance.rm(t, rootFs, "test")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
// test Move/Rename
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
if err != errNotSupported {
require.Error(t, err)
// try to read from it
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/second")
require.Error(t, err)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
require.Error(t, err)
}
// test Copy -- allowed
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported {
require.NoError(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
_, err = rootFs.NewObject("test/third")
require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err)
require.Equal(t, []byte("one content"), data2)
// validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
require.NoError(t, err)
}
// test Remove
err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err)
_, err = rootFs.NewObject("test/one")
require.NoError(t, err)
// validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
require.NoError(t, err)
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
// test Update - this seems to work. Why? FIXME
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
// require.Equal(t, "one content", string(data2))
//
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
// require.NoError(t, err)
// if runInstance.rootIsCrypt {
// require.Equal(t, int64(67), tmpInfo.Size())
// } else {
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
// }
//})
//require.Error(t, err)
// test SetModTime -- seems to work cause of previous
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
//require.NoError(t, err)
//require.Equal(t, secondModTime, firstModTime)
//require.NotEqual(t, time.Time{}, firstModTime)
//require.NotEqual(t, time.Time{}, secondModTime)
}

View File

@@ -1,12 +0,0 @@
--- cache_upload_test.go
+++ cache_upload_test.go
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
}
r.tempFiles = nil
debug.FreeOSMemory()
- for k, v := range r.runDefaultFlagMap {
- _ = flag.Set(k, v)
- }
}
func (r *run) randomBytes(t *testing.T, size int64) []byte {

View File

@@ -1,138 +0,0 @@
// +build !plan9
package cache
import (
"time"
"path"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
Directory fs.Directory `json:"-"` // can be nil
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the directory
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
CacheType string `json:"cacheType"` // object type
CacheTs *time.Time `json:",omitempty"`
}
// NewDirectory builds an empty dir which will be used to unmarshal data in it
func NewDirectory(f *Fs, remote string) *Directory {
cd := ShallowDirectory(f, remote)
t := time.Now()
cd.CacheTs = &t
return cd
}
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
func ShallowDirectory(f *Fs, remote string) *Directory {
var cd *Directory
fullRemote := cleanPath(path.Join(f.Root(), remote))
// build a new one
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
cd = &Directory{
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheItems: 0,
CacheType: "Directory",
}
return cd
}
// DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote())
dir := cleanPath(path.Dir(fullRemote))
name := cleanPath(path.Base(fullRemote))
t := time.Now()
cd = &Directory{
Directory: d,
CacheFs: f,
Name: name,
Dir: dir,
CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(),
CacheItems: d.Items(),
CacheType: "Directory",
CacheTs: &t,
}
return cd
}
// Fs returns its FS info
func (d *Directory) Fs() fs.Info {
return d.CacheFs
}
// String returns a human friendly name for this object
func (d *Directory) String() string {
if d == nil {
return "<nil>"
}
return d.Remote()
}
// Remote returns the remote path
func (d *Directory) Remote() string {
return d.CacheFs.cleanRootFromPath(d.abs())
}
// abs returns the absolute path to the dir
func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime)
}
// Size returns the cached Size
func (d *Directory) Size() int64 {
return d.CacheSize
}
// Items returns the cached Items
func (d *Directory) Items() int64 {
return d.CacheItems
}
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var (
_ fs.Directory = (*Directory)(nil)
)

View File

@@ -1,668 +0,0 @@
// +build !plan9
package cache
import (
"fmt"
"io"
"sync"
"time"
"path"
"runtime"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors"
)
var uploaderMap = make(map[string]*backgroundWriter)
var uploaderMapMx sync.Mutex
// initBackgroundUploader returns a single instance
func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// write lock to create one
uploaderMapMx.Lock()
defer uploaderMapMx.Unlock()
if b, ok := uploaderMap[fs.String()]; ok {
// if it was already started we close it so that it can be started again
if b.running {
b.close()
} else {
return b, nil
}
}
bb := newBackgroundWriter(fs)
uploaderMap[fs.String()] = bb
return uploaderMap[fs.String()], nil
}
// Handle is managing the read/write/seek operations on an open handle
type Handle struct {
cachedObject *Object
cfs *Fs
memory *Memory
preloadQueue chan int64
preloadOffset int64
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
confirmReading chan bool
UseMemory bool
workers []*worker
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{
cachedObject: o,
cfs: cfs,
offset: 0,
preloadOffset: -1, // -1 to trigger the first preload
UseMemory: !cfs.opt.ChunkNoMemory,
reading: false,
}
r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1)
// create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
r.confirmReading = make(chan bool)
r.startReadWorkers()
return r
}
// cacheFs is a convenience method to get the parent cache FS of the object's manager
func (r *Handle) cacheFs() *Fs {
return r.cfs
}
// storage is a convenience method to get the persistent storage of the object's manager
func (r *Handle) storage() *Persistent {
return r.cacheFs().cache
}
// String representation of this reader
func (r *Handle) String() string {
return r.cachedObject.abs()
}
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.hasAtLeastOneWorker() {
return
}
totalWorkers := r.cacheFs().opt.TotalWorkers
if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() {
err := r.cacheFs().plexConnector.authenticate()
if err != nil {
fs.Errorf(r, "failed to authenticate to Plex: %v", err)
}
}
if r.cacheFs().plexConnector.isConnected() {
totalWorkers = 1
}
}
r.scaleWorkers(totalWorkers)
}
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := len(r.workers)
if current == desired {
return
}
if current > desired {
// scale in gracefully
for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1
}
} else {
// scale out
for i := 0; i < desired-current; i++ {
w := &worker{
r: r,
ch: r.preloadQueue,
id: current + i,
}
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
if current != 0 {
fs.Debugf(r, "scale workers to %v", desired)
}
}
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// then we skip this step
if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
return
}
fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
}
// queueOffset will send an offset to the workers if it's different from the last one
func (r *Handle) queueOffset(offset int64) {
if offset != r.preloadOffset {
// clean past in-memory chunks
if r.UseMemory {
go r.memory.CleanChunksByNeed(offset)
}
r.confirmExternalReading()
r.preloadOffset = offset
// clear the past seen chunks
// they will remain in our persistent storage but will be removed from transient
// so they need to be picked up by a worker
for k := range r.seenOffsets {
if k < offset {
r.seenOffsets[k] = false
}
}
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
}
if v, ok := r.seenOffsets[o]; ok && v {
continue
}
r.seenOffsets[o] = true
r.preloadQueue <- o
}
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var data []byte
var err error
// we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
r.queueOffset(chunkStart)
found := false
if r.UseMemory {
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
}
}
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
break
}
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
time.Sleep(time.Millisecond * 500)
}
}
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
if offset > 0 {
if offset > int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF
}
data = data[int(offset):]
}
return data, nil
}
// Read a chunk from storage or len(p)
func (r *Handle) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
var buf []byte
// first reading
if !r.reading {
r.reading = true
}
// reached EOF
if r.offset >= r.cachedObject.Size() {
return 0, io.EOF
}
currentOffset := r.offset
buf, err = r.getChunk(currentOffset)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
}
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
return 0, io.EOF
}
readSize := copy(p, buf)
newOffset := currentOffset + int64(readSize)
r.offset = newOffset
return readSize, err
}
// Close will tell the workers to stop
func (r *Handle) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.closed {
return errors.New("file already closed")
}
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
return nil
}
// Seek will move the current offset based on whence and instruct the workers to move there too
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
var err error
switch whence {
case io.SeekStart:
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
r.offset = offset
case io.SeekCurrent:
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
r.offset += offset
case io.SeekEnd:
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
}
r.queueOffset(chunkStart)
return r.offset, err
}
type worker struct {
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
}
// String is a representation of this worker
func (w *worker) String() string {
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
}
// reader will return a reader depending on the capabilities of the source reader:
// - if it supports seeking it will seek to the desired offset and return the same reader
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
// - if there's no reader associated with this worker, it will create one
func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
var err error
r := w.rc
if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
})
if err != nil {
return nil, err
}
return r, nil
}
if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err
} else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart)
return r, err
}
}
_ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil {
return nil, err
}
return r, nil
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
}()
for {
chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open {
break
}
// skip if it exists
if w.r.UseMemory {
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
continue
}
// add it in ram if it's in the persistent storage
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
if err == nil {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
} else {
continue
}
}
} else {
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
continue
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
// TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size()
//}
w.download(chunkStart, chunkEnd, 0)
}
}
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var err error
var data []byte
// stop retries
if retry >= w.r.cacheFs().opt.ReadRetries {
return
}
// back-off between retries
if retry > 0 {
time.Sleep(time.Second * time.Duration(retry))
}
closeOpen := false
if retry > 0 {
closeOpen = true
}
w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
// we seem to be getting only errors so we abort
if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = make([]byte, chunkEnd-chunkStart)
var sourceRead int
sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(true)
if err != nil {
fs.Errorf(w, "%v", err)
}
w.download(chunkStart, chunkEnd, retry+1)
return
}
data = data[:sourceRead] // reslice to remove extra garbage
if err == io.ErrUnexpectedEOF {
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
} else {
fs.Debugf(w, "downloaded chunk %v", chunkStart)
}
if w.r.UseMemory {
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
}
}
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
if err != nil {
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
}
}
const (
// BackgroundUploadStarted is a state for a temp file that has started upload
BackgroundUploadStarted = iota
// BackgroundUploadCompleted is a state for a temp file that has completed upload
BackgroundUploadCompleted
// BackgroundUploadError is a state for a temp file that has an error upload
BackgroundUploadError
)
// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
type BackgroundUploadState struct {
Remote string
Status int
Error error
}
type backgroundWriter struct {
fs *Fs
stateCh chan int
running bool
notifyCh chan BackgroundUploadState
mu sync.Mutex
}
func newBackgroundWriter(f *Fs) *backgroundWriter {
b := &backgroundWriter{
fs: f,
stateCh: make(chan int),
notifyCh: make(chan BackgroundUploadState),
}
return b
}
func (b *backgroundWriter) close() {
b.stateCh <- 2
b.mu.Lock()
defer b.mu.Unlock()
b.running = false
}
func (b *backgroundWriter) pause() {
b.stateCh <- 1
}
func (b *backgroundWriter) play() {
b.stateCh <- 0
}
func (b *backgroundWriter) isRunning() bool {
b.mu.Lock()
defer b.mu.Unlock()
return b.running
}
func (b *backgroundWriter) notify(remote string, status int, err error) {
state := BackgroundUploadState{
Remote: remote,
Status: status,
Error: err,
}
select {
case b.notifyCh <- state:
fs.Debugf(remote, "notified background upload state: %v", state.Status)
default:
}
}
func (b *backgroundWriter) run() {
state := 0
for {
b.mu.Lock()
b.running = true
b.mu.Unlock()
select {
case s := <-b.stateCh:
state = s
default:
//
}
switch state {
case 1:
runtime.Gosched()
time.Sleep(time.Millisecond * 500)
continue
case 2:
return
}
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
time.Sleep(time.Second)
continue
}
remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil {
b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath)
fs.Errorf(remote, "background upload: %v", err)
continue
}
// clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote))
for thisDir != "" {
thisList, err := b.fs.tempFs.List(thisDir)
if err != nil {
break
}
if len(thisList) > 0 {
break
}
err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path")
if err != nil {
break
}
thisDir = cleanPath(path.Dir(thisDir))
}
fs.Infof(remote, "background upload: uploaded entry")
err = b.fs.cache.removePendingUpload(absPath)
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
fs.Errorf(remote, "background upload: %v", err)
}
parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
err = b.fs.cache.ExpireDir(parentCd)
if err != nil {
fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
}
b.fs.notifyChangeUpstream(remote, fs.EntryObject)
fs.Infof(remote, "finished background upload")
b.notify(remote, BackgroundUploadCompleted, nil)
}
}
// Check the interfaces are satisfied
var (
_ io.ReadCloser = (*Handle)(nil)
_ io.Seeker = (*Handle)(nil)
)

View File

@@ -1,365 +0,0 @@
// +build !plan9
package cache
import (
"io"
"path"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
const (
objectInCache = "Object"
objectPendingUpload = "TempObject"
)
// Object is a generic file like object that stores basic information about it
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
}
// NewObject builds one from a generic fs.Object
func NewObject(f *Fs, remote string) *Object {
fullRemote := path.Join(f.Root(), remote)
dir, name := path.Split(fullRemote)
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
co := &Object{
ParentFs: parentFs,
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheModTime: time.Now().UnixNano(),
CacheSize: 0,
CacheStorable: false,
CacheType: cacheType,
CacheTs: time.Now(),
}
return co
}
// ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote)
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
parentFs = f.tempFs
fs.Debugf(fullRemote, "pending upload found")
}
}
co = &Object{
ParentFs: parentFs,
CacheFs: f,
Name: cleanPath(name),
Dir: cleanPath(dir),
CacheType: cacheType,
CacheTs: time.Now(),
}
co.updateData(o)
return co
}
func (o *Object) updateData(source fs.Object) {
o.Object = source
o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.CacheHashes = make(map[hash.Type]string)
}
// Fs returns its FS info
func (o *Object) Fs() fs.Info {
return o.CacheFs
}
// String returns a human friendly name for this object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
p := path.Join(o.Dir, o.Name)
return o.CacheFs.cleanRootFromPath(p)
}
// abs returns the absolute path to the object
func (o *Object) abs() string {
return path.Join(o.Dir, o.Name)
}
// ModTime returns the cached ModTime
func (o *Object) ModTime() time.Time {
_ = o.refresh()
return time.Unix(0, o.CacheModTime)
}
// Size returns the cached Size
func (o *Object) Size() int64 {
_ = o.refresh()
return o.CacheSize
}
// Storable returns the cached Storable
func (o *Object) Storable() bool {
_ = o.refresh()
return o.CacheStorable
}
// refresh will check if the object info is expired and request the info from source if it is
// all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified {
return nil
}
return o.refreshFromSource(true)
}
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock()
defer o.refreshMutex.Unlock()
var err error
var liveObject fs.Object
if o.Object != nil && !force {
return nil
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else {
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
return err
}
o.updateData(liveObject)
o.persist()
return nil
}
// SetModTime sets the ModTime of this object
func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
err := o.Object.SetModTime(t)
if err != nil {
return err
}
o.CacheModTime = t.UnixNano()
o.persist()
fs.Debugf(o, "updated ModTime: %v", t)
return nil
}
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
if err := o.refreshFromSource(true); err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
}
_, err = cacheReader.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
return readers.NewLimitedReadCloser(cacheReader, limit), nil
}
// Update will change the object data
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload
err := o.Object.Update(in, src, options...)
if err != nil {
fs.Errorf(o, "error updating source: %v", err)
return err
}
// deleting cached chunks and info to be replaced with new ones
_ = o.CacheFs.cache.RemoveObject(o.abs())
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now()
o.persist()
return nil
}
// Remove deletes the object from both the cache and the source
func (o *Object) Remove() error {
if err := o.refreshFromSource(false); err != nil {
return err
}
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove()
if err != nil {
return err
}
fs.Debugf(o, "removing object")
_ = o.CacheFs.cache.RemoveObject(o.abs())
_ = o.CacheFs.cache.removePendingUpload(o.abs())
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
_ = o.CacheFs.cache.ExpireDir(parentCd)
// advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
return nil
}
// Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.CacheHashes[ht]
if found {
return cachedHash, nil
}
if err := o.refreshFromSource(false); err != nil {
return "", err
}
liveHash, err := o.Object.Hash(ht)
if err != nil {
return "", err
}
o.CacheHashes[ht] = liveHash
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)
return liveHash, nil
}
// persist adds this object to the persistent cache
func (o *Object) persist() *Object {
err := o.CacheFs.cache.AddObject(o)
if err != nil {
fs.Errorf(o, "failed to cache object: %v", err)
}
return o
}
func (o *Object) isTempFile() bool {
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
o.CacheType = objectInCache
return false
}
o.CacheType = objectPendingUpload
return true
}
func (o *Object) tempFileStartedUpload() bool {
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
if err != nil {
return false
}
return started
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var (
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
)

284
backend/cache/plex.go vendored
View File

@@ -1,284 +0,0 @@
// +build !plan9
package cache
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)
const (
// defPlexLoginURL is the default URL for Plex login
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
)
// PlaySessionStateNotification is part of the API response of Plex
type PlaySessionStateNotification struct {
SessionKey string `json:"sessionKey"`
GUID string `json:"guid"`
Key string `json:"key"`
ViewOffset int64 `json:"viewOffset"`
State string `json:"state"`
TranscodeSession string `json:"transcodeSession"`
}
// NotificationContainer is part of the API response of Plex
type NotificationContainer struct {
Type string `json:"type"`
Size int `json:"size"`
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
}
// PlexNotification is part of the API response of Plex
type PlexNotification struct {
Container NotificationContainer `json:"NotificationContainer"`
}
// plexConnector is managing the cache integration with Plex
type plexConnector struct {
url *url.URL
username string
password string
token string
f *Fs
mu sync.Mutex
running bool
runningMu sync.Mutex
stateCache *cache.Cache
saveToken func(string)
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
username: username,
password: password,
token: "",
stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
}
return pc, nil
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
}
pc := &plexConnector{
f: f,
url: u,
token: token,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
return pc, nil
}
func (p *plexConnector) closeWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
fs.Infof("plex", "stopped Plex watcher")
p.running = false
}
func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil {
fs.Errorf("plex", "%v", err)
return
}
p.running = true
go func() {
for {
if !p.isConnected() {
break
}
notif := &PlexNotification{}
err := websocket.JSON.Receive(conn, notif)
if err != nil {
fs.Debugf("plex", "%v", err)
p.closeWebsocket()
break
}
// we're only interested in play events
if notif.Container.Type == "playing" {
// we loop through each of them
for _, v := range notif.Container.PlaySessionState {
// event type of playing
if v.State == "playing" {
// if it's not cached get the details and cache them
if _, found := p.stateCache.Get(v.Key); !found {
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
if err != nil {
continue
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
continue
}
var data []byte
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
}
} else if v.State == "stopped" {
p.stateCache.Delete(v.Key)
}
}
}
}
}()
}
// fillDefaultHeaders will add common headers to requests
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
req.Header.Add("X-Plex-Version", fs.Version)
req.Header.Add("Accept", "application/json")
if p.token != "" {
req.Header.Add("X-Plex-Token", p.token)
}
}
// authenticate will generate a token based on a username/password
func (p *plexConnector) authenticate() error {
p.mu.Lock()
defer p.mu.Unlock()
form := url.Values{}
form.Set("user[login]", p.username)
form.Add("user[password]", p.password)
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
if err != nil {
return err
}
p.fillDefaultHeaders(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
var data map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&data)
if err != nil {
return fmt.Errorf("failed to obtain token: %v", err)
}
tokenGen, ok := get(data, "user", "authToken")
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
token, ok := tokenGen.(string)
if !ok {
return fmt.Errorf("failed to obtain token: %v", data)
}
p.token = token
if p.token != "" {
if p.saveToken != nil {
p.saveToken(p.token)
}
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
p.listenWebsocket()
return nil
}
// isConnected checks if this rclone is authenticated to Plex
func (p *plexConnector) isConnected() bool {
p.runningMu.Lock()
defer p.runningMu.Unlock()
return p.running
}
// isConfigured checks if this rclone is configured to use a Plex server
func (p *plexConnector) isConfigured() bool {
return p.url != nil
}
func (p *plexConnector) isPlaying(co *Object) bool {
var err error
if !p.isConnected() {
p.listenWebsocket()
}
remote := co.Remote()
if cr, yes := p.f.isWrappedByCrypt(); yes {
remote, err = cr.DecryptFileName(co.Remote())
if err != nil {
fs.Debugf("plex", "can not decrypt wrapped file: %v", err)
return false
}
}
isPlaying := false
for _, v := range p.stateCache.Items() {
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
isPlaying = true
break
}
}
return isPlaying
}
// adapted from: https://stackoverflow.com/a/28878037 (credit)
func get(m interface{}, path ...interface{}) (interface{}, bool) {
for _, p := range path {
switch idx := p.(type) {
case string:
if mm, ok := m.(map[string]interface{}); ok {
if val, found := mm[idx]; found {
m = val
continue
}
}
return nil, false
case int:
if mm, ok := m.([]interface{}); ok {
if len(mm) > idx {
m = mm[idx]
continue
}
}
return nil, false
}
}
return m, true
}

View File

@@ -1,98 +0,0 @@
// +build !plan9
package cache
import (
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)
// Memory is a wrapper of transient storage for a go-cache store
type Memory struct {
db *cache.Cache
}
// NewMemory builds this cache storage
// defaultExpiration will set the expiry time of chunks in this storage
func NewMemory(defaultExpiration time.Duration) *Memory {
mem := &Memory{}
err := mem.Connect(defaultExpiration)
if err != nil {
fs.Errorf("cache", "can't open ram connection: %v", err)
}
return mem
}
// Connect will create a connection for the storage
func (m *Memory) Connect(defaultExpiration time.Duration) error {
m.db = cache.New(defaultExpiration, -1)
return nil
}
// HasChunk confirms the existence of a single chunk of an object
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
_, found := m.db.Get(key)
return found
}
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
var data []byte
if x, found := m.db.Get(key); found {
data = x.([]byte)
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
return m.AddChunkAhead(fp, data, offset, time.Second)
}
// AddChunkAhead adds a new chunk of a cached object
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
key := fp + "-" + strconv.FormatInt(offset, 10)
m.db.Set(key, data, cache.DefaultExpiration)
return nil
}
// CleanChunksByAge will cleanup on a cron basis
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
m.db.DeleteExpired()
}
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
func (m *Memory) CleanChunksByNeed(offset int64) {
var items map[string]cache.Item
items = m.db.Items()
for key := range items {
sepIdx := strings.LastIndex(key, "-")
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
if err != nil {
fs.Errorf("cache", "couldn't parse offset entry %v", key)
continue
}
if keyOffset < offset {
m.db.Delete(key)
}
}
}
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
func (m *Memory) CleanChunksBySize(maxSize int64) {
// NOOP
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,781 +0,0 @@
// Package crypt provides wrappers for Fs and Object which implement encryption
package crypt
import (
"fmt"
"io"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
}, {
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Encrypt directory names.",
},
{
Value: "false",
Help: "Don't encrypt directory names, leave them intact.",
},
},
}, {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "show_mapping",
Help: "For all files listed show how the names encrypt.",
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}},
})
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
}
if opt.Password == "" {
return nil, errors.New("password not set in config file")
}
password, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
return cipher, nil
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
return newCipherForConfig(opt)
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
cipher, err := newCipherForConfig(opt)
if err != nil {
return nil, err
}
remote := opt.Remote
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Look for a file first
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
}
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
cipher: cipher,
}
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
DuplicateFiles: true,
ReadMimeType: false, // MimeTypes not supported with crypt
WriteMimeType: false,
BucketBased: true,
CanHaveEmptyDirectories: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ShowMapping bool `config:"show_mapping"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
opt Options
features *fs.Features // optional features
cipher Cipher
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root)
}
// Encrypt an object file name to entries.
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
}
// Encrypt an directory file name to entries.
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(dir))
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
f.add(&newEntries, x)
case fs.Directory:
f.addDir(&newEntries, x)
default:
return nil, errors.Errorf("Unknown object type %T", entry)
}
}
return newEntries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
return f.encryptEntries(entries)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(entries)
if err != nil {
return err
}
return callback(newEntries)
})
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, err
}
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
// Check the hashes of the encrypted data if we were comparing them
if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht]
var dstHash string
dstHash, err = o.Hash(ht)
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove()
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
return f.newObject(o), nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(in, src, options, f.Fs.Features().PutStream)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
}
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge() error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do()
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
o, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil {
return nil, err
}
return f.newObject(oResult), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
return f.newObject(o), nil
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp
if do == nil {
return errors.New("can't CleanUp")
}
return do()
}
// About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do()
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName)
}
// DecryptFileName returns a decrypted file name
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
}
nonce := d.nonce
// fs.Debugf(o, "Read nonce % 2x", nonce)
// Check nonce isn't all zeros
isZero := true
for i := range nonce {
if nonce[i] != 0 {
isZero = false
}
}
if isZero {
fs.Errorf(o, "empty nonce read")
}
// Close d (and hence in) once we have read the nonce
err = d.Close()
if err != nil {
return "", errors.Wrap(err, "failed to close nonce read")
}
// Open the src for input
in, err = src.Open()
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
type Object struct {
fs.Object
f *Fs
}
func (f *Fs) newObject(o fs.Object) *Object {
return &Object{
Object: o,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
remote := o.Object.Remote()
decryptedName, err := o.f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable file name: %v", err)
return remote
}
return decryptedName
}
// Size returns the size of the file
func (o *Object) Size() int64 {
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// UnWrap returns the wrapped Object
func (o *Object) UnWrap() fs.Object {
return o.Object
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
// pass on Options to underlying open if appropriate
openOptions = append(openOptions, option)
}
}
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek
return o.Object.Open(openOptions...)
}
// Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1)
if underlyingLimit >= 0 {
end = underlyingOffset + underlyingLimit - 1
if end >= o.Object.Size() {
end = -1
}
}
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(newOpenOptions...)
}, offset, limit)
if err != nil {
return nil, err
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(in, src, options...)
}
_, err := o.f.put(in, src, options, update)
return err
}
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
newDir.SetRemote(decryptedRemote)
}
return newDir
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
}
}
// Fs returns read only access to the Fs that this object is part of
func (o *ObjectInfo) Fs() fs.Info {
return o.f
}
// Remote returns the remote path
func (o *ObjectInfo) Remote() string {
return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote())
}
// Size returns the size of the file
func (o *ObjectInfo) Size() int64 {
size := o.ObjectInfo.Size()
if size < 0 {
return size
}
return o.f.cipher.EncryptedSize(size)
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
)

View File

@@ -1,62 +0,0 @@
// Test Crypt filesystem interface
package crypt_test
import (
"os"
"path/filepath"
"testing"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest/fstests"
)
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
})
}
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
})
}
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
})
}

View File

@@ -1,63 +0,0 @@
// Package pkcs7 implements PKCS#7 padding
//
// This is a standard way of encoding variable length buffers into
// buffers which are a multiple of an underlying crypto block size.
package pkcs7
import "github.com/pkg/errors"
// Errors Unpad can return
var (
ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded")
ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize")
ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long")
ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short")
ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same")
)
// Pad buf using PKCS#7 to a multiple of n.
//
// Appends the padding to buf - make a copy of it first if you don't
// want it modified.
func Pad(n int, buf []byte) []byte {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
padding := n - (length % n)
for i := 0; i < padding; i++ {
buf = append(buf, byte(padding))
}
if (len(buf) % n) != 0 {
panic("padding failed")
}
return buf
}
// Unpad buf using PKCS#7 from a multiple of n returning a slice of
// buf or an error if malformed.
func Unpad(n int, buf []byte) ([]byte, error) {
if n <= 1 || n >= 256 {
panic("bad multiple")
}
length := len(buf)
if length == 0 {
return nil, ErrorPaddingNotFound
}
if (length % n) != 0 {
return nil, ErrorPaddingNotAMultiple
}
padding := int(buf[length-1])
if padding > n {
return nil, ErrorPaddingTooLong
}
if padding == 0 {
return nil, ErrorPaddingTooShort
}
for i := 0; i < padding; i++ {
if buf[length-1-i] != byte(padding) {
return nil, ErrorPaddingNotAllTheSame
}
}
return buf[:length-padding], nil
}

View File

@@ -1,73 +0,0 @@
package pkcs7
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestPad(t *testing.T) {
for _, test := range []struct {
n int
in string
expected string
}{
{8, "", "\x08\x08\x08\x08\x08\x08\x08\x08"},
{8, "1", "1\x07\x07\x07\x07\x07\x07\x07"},
{8, "12", "12\x06\x06\x06\x06\x06\x06"},
{8, "123", "123\x05\x05\x05\x05\x05"},
{8, "1234", "1234\x04\x04\x04\x04"},
{8, "12345", "12345\x03\x03\x03"},
{8, "123456", "123456\x02\x02"},
{8, "1234567", "1234567\x01"},
{8, "abcdefgh", "abcdefgh\x08\x08\x08\x08\x08\x08\x08\x08"},
{8, "abcdefgh1", "abcdefgh1\x07\x07\x07\x07\x07\x07\x07"},
{8, "abcdefgh12", "abcdefgh12\x06\x06\x06\x06\x06\x06"},
{8, "abcdefgh123", "abcdefgh123\x05\x05\x05\x05\x05"},
{8, "abcdefgh1234", "abcdefgh1234\x04\x04\x04\x04"},
{8, "abcdefgh12345", "abcdefgh12345\x03\x03\x03"},
{8, "abcdefgh123456", "abcdefgh123456\x02\x02"},
{8, "abcdefgh1234567", "abcdefgh1234567\x01"},
{8, "abcdefgh12345678", "abcdefgh12345678\x08\x08\x08\x08\x08\x08\x08\x08"},
{16, "", "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10"},
{16, "a", "a\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f\x0f"},
} {
actual := Pad(test.n, []byte(test.in))
assert.Equal(t, test.expected, string(actual), fmt.Sprintf("Pad %d %q", test.n, test.in))
recovered, err := Unpad(test.n, actual)
assert.NoError(t, err)
assert.Equal(t, []byte(test.in), recovered, fmt.Sprintf("Unpad %d %q", test.n, test.in))
}
assert.Panics(t, func() { Pad(1, []byte("")) }, "bad multiple")
assert.Panics(t, func() { Pad(256, []byte("")) }, "bad multiple")
}
func TestUnpad(t *testing.T) {
// We've tested the OK decoding in TestPad, now test the error cases
for _, test := range []struct {
n int
in string
err error
}{
{8, "", ErrorPaddingNotFound},
{8, "1", ErrorPaddingNotAMultiple},
{8, "12", ErrorPaddingNotAMultiple},
{8, "123", ErrorPaddingNotAMultiple},
{8, "1234", ErrorPaddingNotAMultiple},
{8, "12345", ErrorPaddingNotAMultiple},
{8, "123456", ErrorPaddingNotAMultiple},
{8, "1234567", ErrorPaddingNotAMultiple},
{8, "1234567\xFF", ErrorPaddingTooLong},
{8, "1234567\x09", ErrorPaddingTooLong},
{8, "1234567\x00", ErrorPaddingTooShort},
{8, "123456\x01\x02", ErrorPaddingNotAllTheSame},
{8, "\x07\x08\x08\x08\x08\x08\x08\x08", ErrorPaddingNotAllTheSame},
} {
result, actualErr := Unpad(test.n, []byte(test.in))
assert.Equal(t, test.err, actualErr, fmt.Sprintf("Unpad %d %q", test.n, test.in))
assert.Equal(t, result, []byte(nil))
}
assert.Panics(t, func() { _, _ = Unpad(1, []byte("")) }, "bad multiple")
assert.Panics(t, func() { _, _ = Unpad(256, []byte("")) }, "bad multiple")
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,119 +0,0 @@
package drive
import (
"encoding/json"
"testing"
"google.golang.org/api/drive/v3"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
const exampleExportFormats = `{
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleExportFormats(t *testing.T) {
exportFormatsOnce.Do(func() {})
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
}
func TestInternalParseExtensions(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantErr error
}{
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} {
f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, f.extensions)
}
// Test it is appending
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
}
for _, test := range []struct {
extensions []string
wantExtension string
wantMimeType string
}{
{[]string{}, "", ""},
{[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
} {
f := new(Fs)
f.extensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
}
}

View File

@@ -1,17 +0,0 @@
// Test Drive filesystem interface
package drive_test
import (
"testing"
"github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:",
NilObject: (*drive.Object)(nil),
})
}

View File

@@ -1,249 +0,0 @@
// Upload for drive
//
// Docs
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
// Files update: https://developers.google.com/drive/v2/reference/files/update
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
package drive
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
)
const (
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
statusResumeIncomplete = 308
)
// resumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type resumableUpload struct {
f *Fs
remote string
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
// Media is the object being uploaded.
Media io.Reader
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
// ContentLength is the full size of the object being uploaded.
ContentLength int64
// Return value
ret *drive.File
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
method := "POST"
if fileID != "" {
params.Set("setModifiedDate", "true")
urls += "/{fileId}"
method = "PATCH"
}
urls += "?" + params.Encode()
var res *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
var body io.Reader
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
if err != nil {
return false, err
}
var req *http.Request
req, err = http.NewRequest(method, urls, body)
if err != nil {
return false, err
}
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
loc := res.Header.Get("Location")
rx := &resumableUpload{
f: f,
remote: remote,
URI: loc,
Media: in,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == statusResumeIncomplete {
return res.StatusCode, nil
}
err = googleapi.CheckResponse(res)
if err != nil {
return res.StatusCode, err
}
// When the entire file upload is complete, the server
// responds with an HTTP 201 Created along with any metadata
// associated with this resource. If this request had been
// updating an existing entity rather than creating a new one,
// the HTTP response code for a completed upload would have
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
}
return res.StatusCode, nil
}
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil
}
return again, err
})
if err != nil {
return nil, err
}
start += reqSize
}
// Resume or retry uploads that fail due to connection interruptions or
// any 5xx errors, including:
//
// 500 Internal Server Error
// 502 Bad Gateway
// 503 Service Unavailable
// 504 Gateway Timeout
//
// Use an exponential backoff strategy if any 5xx server error is
// returned when resuming or retrying upload requests. These errors can
// occur if a server is getting overloaded. Exponential backoff can help
// alleviate these kinds of problems during periods of high volume of
// requests or heavy network traffic. Other kinds of requests should not
// be handled by exponential backoff but you can still retry a number of
// them. When retrying these requests, limit the number of times you
// retry them. For example your code could limit to ten retries or less
// before reporting an error.
//
// Handle 404 Not Found errors when doing resumable uploads by starting
// the entire upload over from the beginning.
if rx.ret == nil {
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
}
return rx.ret, nil
}

View File

@@ -1,127 +0,0 @@
// Package dbhash implements the dropbox hash as described in
//
// https://www.dropbox.com/developers/reference/content-hash
package dbhash
import (
"crypto/sha256"
"hash"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha256.BlockSize
// Size of the checksum in bytes.
Size = sha256.BlockSize
bytesPerBlock = 4 * 1024 * 1024
hashReturnedError = "hash function returned error"
)
type digest struct {
n int // bytes written into blockHash so far
blockHash hash.Hash
totalHash hash.Hash
sumCalled bool
writtenMore bool
}
// New returns a new hash.Hash computing the Dropbox checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// writeBlockHash writes the current block hash into the total hash
func (d *digest) writeBlockHash() {
blockHash := d.blockHash.Sum(nil)
_, err := d.totalHash.Write(blockHash)
if err != nil {
panic(hashReturnedError)
}
// reset counters for blockhash
d.n = 0
d.blockHash.Reset()
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
for len(p) > 0 {
d.writtenMore = true
toWrite := bytesPerBlock - d.n
if toWrite > len(p) {
toWrite = len(p)
}
_, err = d.blockHash.Write(p[:toWrite])
if err != nil {
panic(hashReturnedError)
}
d.n += toWrite
p = p[toWrite:]
// Accumulate the total hash
if d.n == bytesPerBlock {
d.writeBlockHash()
}
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
//
// TODO(ncw) Sum() can only be called once for this type of hash.
// If you call Sum(), then Write() then Sum() it will result in
// a panic. Calling Write() then Sum(), then Sum() is OK.
func (d *digest) Sum(b []byte) []byte {
if d.sumCalled && d.writtenMore {
panic("digest.Sum() called more than once")
}
d.sumCalled = true
d.writtenMore = false
if d.n != 0 {
d.writeBlockHash()
}
return d.totalHash.Sum(b)
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.n = 0
d.totalHash = sha256.New()
d.blockHash = sha256.New()
d.sumCalled = false
d.writtenMore = false
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return d.totalHash.Size()
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return d.totalHash.BlockSize()
}
// Sum returns the Dropbox checksum of the data.
func Sum(data []byte) [Size]byte {
var d digest
d.Reset()
_, _ = d.Write(data)
var out [Size]byte
d.Sum(out[:0])
return out
}
// must implement this interface
var _ hash.Hash = (*digest)(nil)

View File

@@ -1,88 +0,0 @@
package dbhash_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert"
)
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {
n int
want string
}{
{0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{1, "1cd6ef71e6e0ff46ad2609d403dc3fee244417089aa4461245a4e4fe23a55e42"},
{2, "01e0655fb754d10418a73760f57515f4903b298e6d67dda6bf0987fa79c22c88"},
{4096, "8620913d33852befe09f16fff8fd75f77a83160d29f76f07e0276e9690903035"},
{4194303, "647c8627d70f7a7d13ce96b1e7710a771a55d41a62c3da490d92e56044d311fa"},
{4194304, "d4d63bac5b866c71620185392a8a6218ac1092454a2d16f820363b69852befa3"},
{4194305, "8f553da8d00d0bf509d8470e242888be33019c20c0544811f5b2b89e98360b92"},
{8388607, "83b30cf4fb5195b04a937727ae379cf3d06673bf8f77947f6a92858536e8369c"},
{8388608, "e08b3ba1f538804075c5f939accdeaa9efc7b5c01865c94a41e78ca6550a88e7"},
{8388609, "02c8a4aefc2bfc9036f89a7098001865885938ca580e5c9e5db672385edd303c"},
} {
d := dbhash.New()
var toWrite int
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
n, err := d.Write(data)
assert.Nil(t, err)
assert.Equal(t, chunk, n)
}
n, err := d.Write(data[:toWrite])
assert.Nil(t, err)
assert.Equal(t, toWrite, n)
got := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got, fmt.Sprintf("when testing length %d", n))
}
}
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
func TestSumCalledTwice(t *testing.T) {
d := dbhash.New()
assert.NotPanics(t, func() { d.Sum(nil) })
d.Reset()
assert.NotPanics(t, func() { d.Sum(nil) })
assert.NotPanics(t, func() { d.Sum(nil) })
_, _ = d.Write([]byte{1})
assert.Panics(t, func() { d.Sum(nil) })
}
func TestSize(t *testing.T) {
d := dbhash.New()
assert.Equal(t, 32, d.Size())
}
func TestBlockSize(t *testing.T) {
d := dbhash.New()
assert.Equal(t, 64, d.BlockSize())
}
func TestSum(t *testing.T) {
assert.Equal(t,
[64]byte{
0x1c, 0xd6, 0xef, 0x71, 0xe6, 0xe0, 0xff, 0x46,
0xad, 0x26, 0x09, 0xd4, 0x03, 0xdc, 0x3f, 0xee,
0x24, 0x44, 0x17, 0x08, 0x9a, 0xa4, 0x46, 0x12,
0x45, 0xa4, 0xe4, 0xfe, 0x23, 0xa5, 0x5e, 0x42,
},
dbhash.Sum([]byte{'A'}),
)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Dropbox filesystem interface
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:",
NilObject: (*dropbox.Object)(nil),
})
}

View File

@@ -1,761 +0,0 @@
// Package ftp interfaces with FTP servers
package ftp
import (
"io"
"net/textproto"
"os"
"path"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "ftp",
Description: "FTP Connection",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
url string
user string
pass string
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
}
// Object describes an FTP file
type Object struct {
fs *Fs
remote string
info *FileInfo
}
// FileInfo is the metadata known about an FTP file
type FileInfo struct {
Name string
Size uint64
ModTime time.Time
IsDir bool
}
// ------------------------------------------------------------
// Name of this fs
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.url
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return c, nil
}
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
f.pool = f.pool[1:]
}
f.poolMu.Unlock()
if c != nil {
return c, nil
}
return f.ftpConnection()
}
// Return an FTP connection to the pool
//
// It nils the pointed to connection out so it can't be reused
//
// if err is not nil then it checks the connection is alive using a
// NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
c := *pc
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
_, isRegularError := errors.Cause(err).(*textproto.Error)
if !isRegularError {
nopErr := c.NoOp()
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
_ = c.Quit()
return
}
}
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
f.poolMu.Unlock()
}
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
}
user := opt.User
if user == "" {
user = os.Getenv("USER")
}
port := opt.Port
if port == "" {
port = "21"
}
dialAddr := opt.Host + ":" + port
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
opt: *opt,
url: u,
user: user,
pass: pass,
dialAddr: dialAddr,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "NewFs")
}
f.putFtpConnection(&c, nil)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = path.Dir(root)
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, err
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorObjectNotFound
}
}
return err
}
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
func translateErrorDir(err error) error {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
err = fs.ErrorDirNotFound
}
}
return err
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
if file.Name == base {
return file, nil
}
}
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote)
if err != nil {
return nil, err
}
if entry != nil && entry.Type != ftp.EntryTypeFolder {
o := &Object{
fs: f,
remote: remote,
}
info := &FileInfo{
Name: remote,
Size: entry.Size,
ModTime: entry.Time,
}
o.info = info
return o, nil
}
return nil, fs.ErrorObjectNotFound
}
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(remote string) (exists bool, err error) {
entry, err := f.findItem(remote)
if err != nil {
return false, errors.Wrap(err, "dirExists")
}
if entry != nil && entry.Type == ftp.EntryTypeFolder {
return true, nil
}
return false, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
}
files, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorDir(err)
}
// Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no
// entries found.
if len(files) == 0 {
exists, err := f.dirExists(dir)
if err != nil {
return nil, errors.Wrap(err, "list")
}
if !exists {
return nil, fs.ErrorDirNotFound
}
}
for i := range files {
object := files[i]
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
if object.Name == "." || object.Name == ".." {
continue
}
d := fs.NewDir(newremote, object.Time)
entries = append(entries, d)
default:
o := &Object{
fs: f,
remote: newremote,
}
info := &FileInfo{
Name: newremote,
Size: object.Size,
ModTime: object.Time,
}
o.info = info
entries = append(entries, o)
}
}
return entries, nil
}
// Hashes are not supported
func (f *Fs) Hashes() hash.Set {
return 0
}
// Precision shows Modified Time not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote())
if err != nil {
return nil, errors.Wrap(err, "Put mkParentDir failed")
}
o := &Object{
fs: f,
remote: src.Remote(),
}
err = o.Update(in, src, options...)
return o, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
if files[i].Name == base {
info := &FileInfo{
Name: remote,
Size: files[i].Size,
ModTime: files[i].Time,
IsDir: files[i].Type == ftp.EntryTypeFolder,
}
return info, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
if abspath == "." || abspath == "/" {
return nil
}
fi, err := f.getInfo(abspath)
if err == nil {
if fi.IsDir {
return nil
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "mkdir %q failed", abspath)
}
parent := path.Dir(abspath)
err = f.mkdir(parent)
if err != nil {
return err
}
c, connErr := f.getFtpConnection()
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(abspath)
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
case 521: // dir already exists: error number according to RFC 959: issue #2363
err = nil
}
}
return err
}
// mkParentDir makes the parent of remote if necessary and any
// directories above that
func (f *Fs) mkParentDir(remote string) error {
parent := path.Dir(remote)
return f.mkdir(path.Join(f.root, parent))
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir)
return f.mkdir(root)
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
// Move renames a remote file object
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
if err != nil {
return nil, errors.Wrap(err, "Move mkParentDir failed")
}
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
path.Join(srcObj.fs.root, srcObj.remote),
path.Join(f.root, remote),
)
f.putFtpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "Move Rename failed")
}
dstObj, err := f.NewObject(remote)
if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed")
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Check if destination exists
fi, err := f.getInfo(dstPath)
if err == nil {
if fi.IsDir {
return fs.ErrorDirExists
}
return fs.ErrorIsFile
} else if err != fs.ErrorObjectNotFound {
return errors.Wrapf(err, "DirMove getInfo failed")
}
// Make sure the parent directory exists
err = f.mkdir(path.Dir(dstPath))
if err != nil {
return errors.Wrap(err, "DirMove mkParentDir dst failed")
}
// Do the move
c, err := f.getFtpConnection()
if err != nil {
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
srcPath,
dstPath,
)
f.putFtpConnection(&c, err)
if err != nil {
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
}
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// String version of o
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(o.info.Size)
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.info.ModTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(modTime time.Time) error {
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// ftpReadCloser implements io.ReadCloser for FTP objects.
type ftpReadCloser struct {
rc io.ReadCloser
c *ftp.ServerConn
f *Fs
err error // errors found during read
}
// Read bytes into p
func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
n, err = f.rc.Read(p)
if err != nil && err != io.EOF {
f.err = err // store any errors for Close to examine
}
return
}
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
err := f.rc.Close()
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
c, err := o.fs.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(path, uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
remove := func() {
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
} else {
fs.Debugf(o, "Removed after failed upload: %v", err)
}
}
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(path, in)
if err != nil {
_ = c.Quit()
remove()
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
o.info, err = o.fs.getInfo(path)
if err != nil {
return errors.Wrap(err, "update getinfo")
}
return nil
}
// Remove an object
func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file
info, err := o.fs.getInfo(path)
if err != nil {
return err
}
if info.IsDir {
err = o.fs.Rmdir(o.remote)
} else {
c, err := o.fs.getFtpConnection()
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(path)
o.fs.putFtpConnection(&c, err)
}
return err
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -1,17 +0,0 @@
// Test FTP filesystem interface
package ftp_test
import (
"testing"
"github.com/ncw/rclone/backend/ftp"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil),
})
}

View File

@@ -1,991 +0,0 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
package googlecloudstorage
/*
Notes
Can't set Updated but can set Metadata on object creation
Patch needs full_control not just read_write
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
storage "google.golang.org/api/storage/v1"
)
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
var (
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "google cloud storage",
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
if saFile != "" || saCreds != "" {
return
}
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "bucketOwnerFullControl",
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
}, {
Value: "bucketOwnerRead",
Help: "Object owner gets OWNER access, and project team owners get READER access.",
}, {
Value: "private",
Help: "Object owner gets OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Object owner gets OWNER access, and all Users get READER access.",
}},
}, {
Name: "bucket_acl",
Help: "Access Control List for new buckets.",
Examples: []fs.OptionExample{{
Value: "authenticatedRead",
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
}, {
Value: "private",
Help: "Project team owners get OWNER access [default if left blank].",
}, {
Value: "projectPrivate",
Help: "Project team members get access according to their roles.",
}, {
Value: "publicRead",
Help: "Project team owners get OWNER access, and all Users get READER access.",
}, {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "location",
Help: "Location for the newly created buckets.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for default location (US).",
}, {
Value: "asia",
Help: "Multi-regional location for Asia.",
}, {
Value: "eu",
Help: "Multi-regional location for Europe.",
}, {
Value: "us",
Help: "Multi-regional location for United States.",
}, {
Value: "asia-east1",
Help: "Taiwan.",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
}, {
Value: "europe-west1",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London.",
}, {
Value: "us-central1",
Help: "Iowa.",
}, {
Value: "us-east1",
Help: "South Carolina.",
}, {
Value: "us-east4",
Help: "Northern Virginia.",
}, {
Value: "us-west1",
Help: "Oregon.",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing objects in Google Cloud Storage.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "MULTI_REGIONAL",
Help: "Multi-regional storage class",
}, {
Value: "REGIONAL",
Help: "Regional storage class",
}, {
Value: "NEARLINE",
Help: "Nearline storage class",
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a storage object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
again = true
} else {
switch gerr := err.(type) {
case *googleapi.Error:
if gerr.Code >= 500 && gerr.Code < 600 {
// All 5xx errors should be retried
again = true
} else if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
again = true
}
}
}
}
}
return again, err
}
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
return nil, errors.Wrap(err, "error processing credentials")
}
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
var oAuthClient *http.Client
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.ObjectACL == "" {
opt.ObjectACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = "private"
}
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Create a new authorized Drive client.
f.client = oAuthClient
f.svc, err = storage.New(f.client)
if err != nil {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *storage.Object, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
if !recurse {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
// is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if objects.NextPageToken == "" {
break
}
list.PageToken(objects.NextPageToken)
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
break
}
listBuckets.PageToken(buckets.NextPageToken)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(newObject)
return dstObj, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Logf(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
}
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
o.setMetaData(object)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
}
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
o.setMetaData(newObject)
return nil
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {
err = googleapi.CheckResponse(res)
if err != nil {
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(err)
})
if err != nil {
return nil, err
}
_, isRanging := req.Header["Range"]
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
_ = res.Body.Close() // ignore error
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
}
return res.Body, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("")
if err != nil {
return err
}
modTime := src.ModTime()
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
return shouldRetry(err)
})
if err != nil {
return err
}
// Set the metadata for the new object while we have it
o.setMetaData(newObject)
return nil
}
// Remove an object
func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -1,17 +0,0 @@
// Test GoogleCloudStorage filesystem interface
package googlecloudstorage_test
import (
"testing"
"github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestGoogleCloudStorage:",
NilObject: (*googlecloudstorage.Object)(nil),
})
}

View File

@@ -1,503 +0,0 @@
// Package http provides a filesystem interface using golang.org/net/http
//
// It treats HTML pages served from the endpoint as directory
// listings, and includes any links found as files.
package http
import (
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/net/html"
)
var (
errorReadOnly = errors.New("http remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "http",
Description: "http Connection",
NewFs: NewFs,
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}},
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
contentType string
}
// statusError returns an error if the res contained an error
func statusError(res *http.Response, err error) error {
if err != nil {
return err
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
}
return nil
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
if err != nil {
return nil, err
}
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
if err != nil {
return nil, err
}
client := fshttp.NewClient(fs.Config)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
isFile = true
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(f)
if isFile {
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return f.endpointURL
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
err := o.stat()
if err != nil {
return nil, errors.Wrap(err, "Stat failed")
}
return o, nil
}
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
errFoundQuestionMark = errors.New("found ? in URL")
errHostMismatch = errors.New("host mismatch")
errSchemeMismatch = errors.New("scheme mismatch")
errNotUnderRoot = errors.New("not under root")
errNameIsEmpty = errors.New("name is empty")
errNameContainsSlash = errors.New("name contains /")
)
// parseName turns a name as found in the page into a remote path or returns an error
func parseName(base *url.URL, name string) (string, error) {
// make URL absolute
u, err := rest.URLJoin(base, name)
if err != nil {
return "", errURLJoinFailed
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Index(uStr, "?") >= 0 {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
if base.Host != u.Host {
return "", errHostMismatch
}
if base.Scheme != u.Scheme {
return "", errSchemeMismatch
}
// check has path prefix
if !strings.HasPrefix(u.Path, base.Path) {
return "", errNotUnderRoot
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// musn't be empty
if name == "" {
return "", errNameIsEmpty
}
// mustn't contain a / - we are looking for a single level directory
slash := strings.Index(name, "/")
if slash >= 0 && slash != len(name)-1 {
return "", errNameContainsSlash
}
return name, nil
}
// Parse turns HTML for a directory into names
// base should be the base URL to resolve any relative names from
func parse(base *url.URL, in io.Reader) (names []string, err error) {
doc, err := html.Parse(in)
if err != nil {
return nil, err
}
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
names = append(names, name)
}
break
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
walk(c)
}
}
walk(doc)
return names, nil
}
// Read the directory passed in
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
res, err := f.httpClient.Get(URL)
if err == nil && res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
case "text/html":
names, err = parse(u, res.Body)
if err != nil {
return nil, errors.Wrap(err, "readDir")
}
default:
return nil, errors.Errorf("Can't parse content type %q", contentType)
}
return names, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else {
file := &Object{
fs: f,
remote: remote,
}
if err = file.stat(); err != nil {
fs.Debugf(remote, "skipping because of error: %v", err)
continue
}
entries = append(entries, file)
}
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime() time.Time {
return o.modTime
}
// url returns the native url of the object
func (o *Object) url() string {
return o.fs.url(o.remote)
}
// stat updates the info field in the Object
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
}
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
return nil
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
// Do the request
res, err := o.fs.httpClient.Do(req)
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
return res.Body, nil
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove() error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(dir string) error {
return errorReadOnly
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -1,327 +0,0 @@
// +build go1.8
package http
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// Make the test server
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
// config.FileSet(remoteName, "type", "http")
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// return a function to tidy up
return m, ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "", m)
require.NoError(t, err)
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs) {
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, 4, len(entries))
e := entries[0]
assert.Equal(t, "four", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok := e.(fs.Directory)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(6), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
e = entries[2]
assert.Equal(t, "three", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
assert.Equal(t, int64(7), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f)
}
func TestListSubDir(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
entries, err := f.List("three")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestNewObject(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(9), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
}
func TestOpen(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
// Test normal read
fd, err := o.Open()
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject("four/under four.txt")
require.NoError(t, err)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
}
func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List("")
require.NoError(t, err)
sort.Sort(entries)
assert.Equal(t, 1, len(entries))
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
func TestParseName(t *testing.T) {
for i, test := range []struct {
base string
val string
wantErr error
want string
}{
{"http://example.com/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato", nil, "potato"},
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
} {
u, err := url.Parse(test.base)
require.NoError(t, err)
got, gotErr := parseName(u, test.val)
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
assert.Equal(t, test.wantErr, gotErr, what)
assert.Equal(t, test.want, got, what)
}
}
// Load HTML from the file given and parse it, checking it against the entries passed in
func parseHTML(t *testing.T, name string, base string, want []string) {
in, err := os.Open(filepath.Join(testPath, "index_files", name))
require.NoError(t, err)
defer func() {
require.NoError(t, in.Close())
}()
if base == "" {
base = "http://example.com/"
}
u, err := url.Parse(base)
require.NoError(t, err)
entries, err := parse(u, in)
require.NoError(t, err)
assert.Equal(t, want, entries)
}
func TestParseEmpty(t *testing.T) {
parseHTML(t, "empty.html", "", []string(nil))
}
func TestParseApache(t *testing.T) {
parseHTML(t, "apache.html", "http://example.com/nick/pub/", []string{
"SWIG-embed.tar.gz",
"avi2dvd.pl",
"cambert.exe",
"cambert.gz",
"fedora_demo.gz",
"gchq-challenge/",
"mandelterm/",
"pgp-key.txt",
"pymath/",
"rclone",
"readdir.exe",
"rush_hour_solver_cut_down.py",
"snake-puzzle/",
"stressdisk/",
"timer-test",
"words-to-regexp.pl",
"Now 100% better.mp3",
"Now better.mp3",
})
}
func TestParseMemstore(t *testing.T) {
parseHTML(t, "memstore.html", "", []string{
"test/",
"v1.35/",
"v1.36-01-g503cd84/",
"rclone-beta-latest-freebsd-386.zip",
"rclone-beta-latest-freebsd-amd64.zip",
"rclone-beta-latest-windows-amd64.zip",
})
}
func TestParseNginx(t *testing.T) {
parseHTML(t, "nginx.html", "", []string{
"deltas/",
"objects/",
"refs/",
"state/",
"config",
"summary",
})
}
func TestParseCaddy(t *testing.T) {
parseHTML(t, "caddy.html", "", []string{
"mimetype.zip",
"rclone-delete-empty-dirs.py",
"rclone-show-empty-dirs.py",
"stat-windows-386.zip",
"v1.36-155-gcf29ee8b-team-driveβ/",
"v1.36-156-gca76b3fb-team-driveβ/",
"v1.36-156-ge1f0e0f5-team-driveβ/",
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}

View File

@@ -1 +0,0 @@
beetroot

View File

@@ -1 +0,0 @@
hello

View File

@@ -1 +0,0 @@
rutabaga

View File

@@ -1 +0,0 @@
potato

View File

@@ -1,32 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /nick/pub</title>
</head>
<body>
<h1>Index of /nick/pub</h1>
<table><tr><th><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5"><hr></th></tr>
<tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td><td><a href="/nick/">Parent Directory</a></td><td>&nbsp;</td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="SWIG-embed.tar.gz">SWIG-embed.tar.gz</a></td><td align="right">29-Nov-2005 16:27 </td><td align="right">2.3K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="avi2dvd.pl">avi2dvd.pl</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 17K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="cambert.exe">cambert.exe</a></td><td align="right">15-Dec-2006 18:07 </td><td align="right"> 54K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="cambert.gz">cambert.gz</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 18K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="fedora_demo.gz">fedora_demo.gz</a></td><td align="right">08-Jun-2007 11:01 </td><td align="right">1.0M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="gchq-challenge/">gchq-challenge/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="mandelterm/">mandelterm/</a></td><td align="right">13-Jul-2013 22:22 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="pgp-key.txt">pgp-key.txt</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right">400 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="pymath/">pymath/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="rclone">rclone</a></td><td align="right">09-May-2017 17:15 </td><td align="right"> 22M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="readdir.exe">readdir.exe</a></td><td align="right">21-Oct-2016 14:47 </td><td align="right">1.6M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="rush_hour_solver_cut_down.py">rush_hour_solver_cut_down.py</a></td><td align="right">23-Jul-2009 11:44 </td><td align="right"> 14K</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="snake-puzzle/">snake-puzzle/</a></td><td align="right">25-Sep-2016 20:56 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="stressdisk/">stressdisk/</a></td><td align="right">08-Nov-2016 14:25 </td><td align="right"> - </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
</table>
</body></html>

View File

@@ -1,378 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>/</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<style>
* { padding: 0; margin: 0; }
body {
font-family: sans-serif;
text-rendering: optimizespeed;
}
a {
color: #006ed3;
text-decoration: none;
}
a:hover,
h1 a:hover {
color: #319cff;
}
header,
#summary {
padding-left: 5%;
padding-right: 5%;
}
th:first-child,
td:first-child {
padding-left: 5%;
}
th:last-child,
td:last-child {
padding-right: 5%;
}
header {
padding-top: 25px;
padding-bottom: 15px;
background-color: #f2f2f2;
}
h1 {
font-size: 20px;
font-weight: normal;
white-space: nowrap;
overflow-x: hidden;
text-overflow: ellipsis;
}
h1 a {
color: inherit;
}
h1 a:hover {
text-decoration: underline;
}
main {
display: block;
}
.meta {
font-size: 12px;
font-family: Verdana, sans-serif;
border-bottom: 1px solid #9C9C9C;
padding-top: 10px;
padding-bottom: 10px;
}
.meta-item {
margin-right: 1em;
}
#filter {
padding: 4px;
border: 1px solid #CCC;
}
table {
width: 100%;
border-collapse: collapse;
}
tr {
border-bottom: 1px dashed #dadada;
}
tbody tr:hover {
background-color: #ffffec;
}
th,
td {
text-align: left;
padding: 10px 0;
}
th {
padding-top: 15px;
padding-bottom: 15px;
font-size: 16px;
white-space: nowrap;
}
th a {
color: black;
}
th svg {
vertical-align: middle;
}
td {
font-size: 14px;
}
td:first-child {
width: 50%;
}
th:last-child,
td:last-child {
text-align: right;
}
td:first-child svg {
position: absolute;
}
td .name,
td .goup {
margin-left: 1.75em;
word-break: break-all;
overflow-wrap: break-word;
white-space: pre-wrap;
}
footer {
padding: 40px 20px;
font-size: 12px;
text-align: center;
}
@media (max-width: 600px) {
.hideable {
display: none;
}
td:first-child {
width: auto;
}
th:nth-child(2),
td:nth-child(2) {
padding-right: 5%;
text-align: right;
}
}
</style>
</head>
<body>
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" height="0" width="0" style="position: absolute;">
<defs>
<!-- Folder -->
<linearGradient id="f" y2="640" gradientUnits="userSpaceOnUse" x2="244.84" gradientTransform="matrix(.97319 0 0 1.0135 -.50695 -13.679)" y1="415.75" x1="244.84">
<stop stop-color="#b3ddfd" offset="0"/>
<stop stop-color="#69c" offset="1"/>
</linearGradient>
<linearGradient id="e" y2="571.06" gradientUnits="userSpaceOnUse" x2="238.03" gradientTransform="translate(0,2)" y1="346.05" x1="236.26">
<stop stop-color="#ace" offset="0"/>
<stop stop-color="#369" offset="1"/>
</linearGradient>
<g id="folder" transform="translate(-266.06 -193.36)">
<g transform="matrix(.066019 0 0 .066019 264.2 170.93)">
<g transform="matrix(1.4738 0 0 1.4738 -52.053 -166.93)">
<path fill="#69c" d="m98.424 343.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="409.69" x="54.428" fill="#369"/>
<path fill="url(#e)" d="m98.424 345.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="407.69" x="54.428" fill="url(#f)"/>
</g>
</g>
</g>
<!-- File -->
<linearGradient id="a">
<stop stop-color="#cbcbcb" offset="0"/>
<stop stop-color="#f0f0f0" offset=".34923"/>
<stop stop-color="#e2e2e2" offset="1"/>
</linearGradient>
<linearGradient id="d" y2="686.15" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="207.83" gradientTransform="matrix(.28346 0 0 .31053 -608.52 485.11)" x2="380.1" x1="749.25"/>
<linearGradient id="c" y2="287.74" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="169.44" gradientTransform="matrix(.28342 0 0 .31057 -608.52 485.11)" x2="622.33" x1="741.64"/>
<linearGradient id="b" y2="418.54" gradientUnits="userSpaceOnUse" y1="236.13" gradientTransform="matrix(.29343 0 0 .29999 -608.52 485.11)" x2="330.88" x1="687.96">
<stop stop-color="#fff" offset="0"/>
<stop stop-color="#fff" stop-opacity="0" offset="1"/>
</linearGradient>
<g id="file" transform="translate(-278.15 -216.59)">
<g fill-rule="evenodd" transform="matrix(.19775 0 0 .19775 381.05 112.68)">
<path d="m-520.17 525.5v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke-width=".42649" fill="#fff"/>
<g>
<path d="m-520.11 525.68v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke="#000" stroke-width=".42649" fill="url(#d)"/>
<path d="m-386 562.42c-10.108-2.9925-23.206-2.5682-33.101-0.86253 1.7084-10.962 1.922-24.701-0.4271-35.877l33.528 36.739z" stroke-width=".95407pt" fill="url(#c)"/>
<path d="m-519.13 537-0.60402 134.7h131.68l0.0755-33.296c-2.9446 1.1325-32.692-40.998-70.141-39.186-37.483 1.8137-27.785-56.777-61.006-62.214z" stroke-width="1pt" fill="url(#b)"/>
</g>
</g>
</g>
<!-- Up arrow -->
<g id="up-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 .12089 335.67 164.35)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
<!-- Down arrow -->
<g id="down-arrow" transform="translate(-279.22 -208.12)">
<path transform="matrix(.22413 0 0 -.12089 335.67 257.93)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
</g>
</defs>
</svg>
<header>
<h1>
<a href="/">/</a>
</h1>
</header>
<main>
<div class="meta">
<div id="summary">
<span class="meta-item"><b>4</b> directories</span>
<span class="meta-item"><b>4</b> files</span>
<span class="meta-item"><input type="text" placeholder="filter" id="filter" onkeyup='filter()'></span>
</div>
</div>
<div class="listing">
<table aria-describedby="summary">
<thead>
<tr>
<th>
<a href="?sort=name&order=desc">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
</th>
<th>
<a href="?sort=size&order=asc">Size</a>
</th>
<th class="hideable">
<a href="?sort=time&order=asc">Modified</a>
</th>
</tr>
</thead>
<tbody>
<tr class="file">
<td>
<a href="./mimetype.zip">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">mimetype.zip</span>
</a>
</td>
<td data-order="783696">765 KiB</td>
<td class="hideable"><time datetime="2016-04-04T15:36:49Z">04/04/2016 03:36:49 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./rclone-delete-empty-dirs.py">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">rclone-delete-empty-dirs.py</span>
</a>
</td>
<td data-order="1271">1.2 KiB</td>
<td class="hideable"><time datetime="2016-10-26T16:05:08Z">10/26/2016 04:05:08 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./rclone-show-empty-dirs.py">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">rclone-show-empty-dirs.py</span>
</a>
</td>
<td data-order="868">868 B</td>
<td class="hideable"><time datetime="2016-10-26T09:29:34Z">10/26/2016 09:29:34 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./stat-windows-386.zip">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
<span class="name">stat-windows-386.zip</span>
</a>
</td>
<td data-order="704960">688 KiB</td>
<td class="hideable"><time datetime="2016-08-14T20:44:58Z">08/14/2016 08:44:58 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-155-gcf29ee8b-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-155-gcf29ee8b-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-01T21:28:09Z">06/01/2017 09:28:09 PM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-156-gca76b3fb-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-156-gca76b3fb-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-04T08:53:04Z">06/04/2017 08:53:04 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-156-ge1f0e0f5-team-drive%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-156-ge1f0e0f5-team-driveβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-06-02T10:38:05Z">06/02/2017 10:38:05 AM +00:00</time></td>
</tr>
<tr class="file">
<td>
<a href="./v1.36-22-g06ea13a-ssh-agent%CE%B2/">
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
<span class="name">v1.36-22-g06ea13a-ssh-agentβ</span>
</a>
</td>
<td data-order="-1">&mdash;</td>
<td class="hideable"><time datetime="2017-04-10T13:58:02Z">04/10/2017 01:58:02 PM +00:00</time></td>
</tr>
</tbody>
</table>
</div>
</main>
<footer>
Served with <a rel="noopener noreferrer" href="https://caddyserver.com">Caddy</a>
</footer>
<script>
var filterEl = document.getElementById('filter');
function filter() {
var q = filterEl.value.trim().toLowerCase();
var elems = document.querySelectorAll('tr.file');
elems.forEach(function(el) {
if (!q) {
el.style.display = '';
return;
}
var nameEl = el.querySelector('.name');
var nameVal = nameEl.textContent.trim().toLowerCase();
if (nameVal.indexOf(q) !== -1) {
el.style.display = '';
} else {
el.style.display = 'none';
}
});
}
function localizeDatetime(e, index, ar) {
if (e.textContent === undefined) {
return;
}
var d = new Date(e.getAttribute('datetime'));
if (isNaN(d)) {
d = new Date(e.textContent);
if (isNaN(d)) {
return;
}
}
e.textContent = d.toLocaleString();
}
var timeList = Array.prototype.slice.call(document.getElementsByTagName("time"));
timeList.forEach(localizeDatetime);
</script>
</body>
</html>

View File

@@ -1,77 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="robots" content="noindex" />
<title>Index of /</title>
</head>
<body>
<div id="content">
<h1>Index of /</h1>
<table>
<thead>
<tr>
<th>Name</th>
<th>Type</th>
<th>Size</th>
<th>Last modified</th>
<th>MD5</th>
</tr>
</thead>
<tbody>
<tr>
<td><a href="test/">test/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="v1.35/">v1.35/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="v1.36-01-g503cd84/">v1.36-01-g503cd84/</a></td>
<td>application/directory</td>
<td>0 bytes</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-freebsd-386.zip">rclone-beta-latest-freebsd-386.zip</a></td>
<td>application/zip</td>
<td>4.6 MB</td>
<td>2017-06-19 14:04:52</td>
<td>e747003c69c81e675f206a715264bfa8</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-freebsd-amd64.zip">rclone-beta-latest-freebsd-amd64.zip</a></td>
<td>application/zip</td>
<td>5.0 MB</td>
<td>2017-06-19 14:04:53</td>
<td>ff30b5e9bf2863a2373069142e6f2b7f</td>
</tr>
<tr>
<td><a href="rclone-beta-latest-windows-amd64.zip">rclone-beta-latest-windows-amd64.zip</a></td>
<td>application/x-zip-compressed</td>
<td>4.9 MB</td>
<td>2017-06-19 13:56:02</td>
<td>851a5547a0495cbbd94cbc90a80ed6f5</td>
</tr>
</tbody>
</table>
<p class="right"><a href="http://www.memset.com/"><img src="http://www.memset.com/images/Memset_logo_2010.gif" alt="Memset Ltd." /></a></p>
</div>
</body>
</html>

View File

@@ -1,12 +0,0 @@
<html>
<head><title>Index of /atomic/fedora/</title></head>
<body bgcolor="white">
<h1>Index of /atomic/fedora/</h1><hr><pre><a href="../">../</a>
<a href="deltas/">deltas/</a> 04-May-2017 21:37 -
<a href="objects/">objects/</a> 04-May-2017 20:44 -
<a href="refs/">refs/</a> 04-May-2017 20:42 -
<a href="state/">state/</a> 04-May-2017 21:36 -
<a href="config">config</a> 04-May-2017 20:42 118
<a href="summary">summary</a> 04-May-2017 21:36 806
</pre><hr></body>
</html>

View File

@@ -1,61 +0,0 @@
package hubic
import (
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -1,203 +0,0 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisted after some actual experience.
import (
"encoding/json"
"fmt"
"log"
"net/http"
"time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read Openstack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // Openstack token
Endpoint string `json:"endpoint"` // Openstack endpoint
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return errors.Errorf("failed to get credentials: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
f := &Fs{
client: client,
}
// Make the swift Connection
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(fs.Config),
}
err = c.Authenticate()
if err != nil {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/ncw/rclone/backend/hubic"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
})
}

View File

@@ -1,266 +0,0 @@
package api
import (
"encoding/xml"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
timeFormat = "2006-01-02-T15:04:05Z0700"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
if v == "" {
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
*t = Time(newTime)
}
return err
}
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// Flag is a hacky type for checking if an attribute is present
type Flag bool
// UnmarshalXMLAttr sets Flag to true if the attribute is present
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
*f = true
return nil
}
// MarshalXMLAttr : Do not use
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
attr := xml.Attr{
Name: name,
Value: "false",
}
return attr, errors.New("unimplemented")
}
/*
GET http://www.jottacloud.com/JFS/<account>
<user time="2018-07-18-T21:39:10Z" host="dn-132">
<username>12qh1wsht8cssxdtwl15rqh9</username>
<account-type>free</account-type>
<locked>false</locked>
<capacity>5368709120</capacity>
<max-devices>-1</max-devices>
<max-mobile-devices>-1</max-mobile-devices>
<usage>0</usage>
<read-locked>false</read-locked>
<write-locked>false</write-locked>
<quota-write-locked>false</quota-write-locked>
<enable-sync>true</enable-sync>
<enable-foldershare>true</enable-foldershare>
<devices>
<device>
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</device>
</devices>
</user>
*/
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
Capacity int64 `xml:"capacity"`
MaxDevices int `xml:"max-devices"`
MaxMobileDevices int `xml:"max-mobile-devices"`
Usage int64 `xml:"usage"`
ReadLocked bool `xml:"read-locked"`
WriteLocked bool `xml:"write-locked"`
QuotaWriteLocked bool `xml:"quota-write-locked"`
EnableSync bool `xml:"enable-sync"`
EnableFolderShare bool `xml:"enable-foldershare"`
Devices []JottaDevice `xml:"devices>device"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>
<device time="2018-07-23-T20:21:50Z" host="dn-158">
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">Archive</name>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Shared</name>
<size>0</size>
<modified></modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Sync</name>
<size>0</size>
<modified></modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="3" num_mountpoints="3"/>
</device>
*/
// JottaDevice represents a Jottacloud Device
type JottaDevice struct {
Name string `xml:"name"`
DisplayName string `xml:"display_name"`
Type string `xml:"type"`
Sid string `xml:"sid"`
Size int64 `xml:"size"`
User string `xml:"user"`
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<folders>
<folder name="test"/>
</folders>
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
</mountPoint>
*/
// JottaMountPoint represents a Jottacloud mountpoint
type JottaMountPoint struct {
Name string `xml:"name"`
Size int64 `xml:"size"`
Device string `xml:"device"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
<folders>
<folder name="t2"/>c
</folders>
<files>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
</files>
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
</folder>
*/
// JottaFolder represents a JottacloudFolder
type JottaFolder struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
*/
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses
type Error struct {
StatusCode int `xml:"code"`
Message string `xml:"message"`
Reason string `xml:"reason"`
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Reason != "" {
out += fmt.Sprintf(" (%+v)", e.Reason)
}
return out
}

View File

@@ -1,29 +0,0 @@
package api
import (
"encoding/xml"
"testing"
"time"
)
func TestMountpointEmptyModificationTime(t *testing.T) {
mountpoint := `
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/foo/Jotta</path>
<abspath xml:space="preserve">/foo/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>foo</user>
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
</mountPoint>
`
var jf JottaFolder
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
t.Fatal(err)
}
if !time.Time(jf.ModifiedAt).IsZero() {
t.Errorf("got non-zero time, want zero")
}
}

View File

@@ -1,935 +0,0 @@
package jottacloud
import (
"bytes"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/backend/jottacloud/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Sync"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com"
cachePrefix = "rclone-jcmd5-"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "jottacloud",
Description: "JottaCloud",
NewFs: NewFs,
Options: []fs.Option{{
Name: "user",
Help: "User Name",
}, {
Name: "pass",
Help: "Password.",
IsPassword: true,
}, {
Name: "mountpoint",
Help: "The mountpoint to use.",
Required: true,
Examples: []fs.OptionExample{{
Value: "Sync",
Help: "Will be synced by the official client.",
}, {
Value: "Archive",
Help: "Archive",
}},
}, {
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
}
// Fs represents a remote jottacloud
type Fs struct {
name string
root string
user string
opt Options
features *fs.Features
endpointURL string
srv *rest.Client
pacer *pacer.Pacer
}
// Object describes a jottacloud object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs
remote string
hasMetaData bool
size int64
modTime time.Time
md5 string
mimeType string
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("jottacloud root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(path),
}
var result api.JottaFile
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// does not exist
if apiErr.StatusCode == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
}
if err != nil {
return nil, errors.Wrap(err, "read metadata failed")
}
if result.XMLName.Local != "file" {
return nil, fs.ErrorNotAFile
}
return &result, nil
}
// getAccountInfo retrieves account information
func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: rest.URLPathEscape(f.user),
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return info, nil
}
// setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL(mountpoint string) (err error) {
info, err := f.getAccountInfo()
if err != nil {
return errors.Wrap(err, "failed to get endpoint url")
}
f.endpointURL = rest.URLPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
return nil
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
errResponse := new(api.Error)
err := rest.DecodeXML(resp, &errResponse)
if err != nil {
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.Message == "" {
errResponse.Message = resp.Status
}
if errResponse.StatusCode == 0 {
errResponse.StatusCode = resp.StatusCode
}
return errResponse
}
// filePath returns a escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return rest.URLPathEscape(path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file))))
}
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
if opt.Pass != "" {
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
}
f := &Fs{
name: name,
root: root,
user: opt.User,
opt: *opt,
//endpointURL: rest.URLPathEscape(path.Join(user, defaultDevice, opt.Mountpoint)),
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(rootURL),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
CaseInsensitive: true,
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: true,
}).Fill(f)
if user == "" || pass == "" {
return nil, errors.New("jottacloud needs user and password")
}
f.srv.SetUserPass(opt.User, opt.Pass)
f.srv.SetErrorHandler(errorHandler)
err = f.setEndpointURL(opt.Mountpoint)
if err != nil {
return nil, errors.Wrap(err, "couldn't get account info")
}
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = path.Dir(root)
if f.root == "." {
f.root = ""
}
_, err := f.NewObject(remote)
if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
var err error
if info != nil {
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// CreateDir makes a directory
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: f.filePath(path),
Parameters: url.Values{},
}
opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &jf)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
return nil, err
}
// fmt.Printf("...Id %q\n", *info.Id)
return jf, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", dir)
opts := rest.Opts{
Method: "GET",
Path: f.filePath(dir),
}
var resp *http.Response
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
// does not exist
if apiErr.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
}
return nil, errors.Wrap(err, "couldn't list files")
}
if result.Deleted {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if item.Deleted {
continue
}
remote := path.Join(dir, restoreReservedChars(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
}
for i := range result.Files {
item := &result.Files[i]
if item.Deleted || item.State != "COMPLETED" {
continue
}
remote := path.Join(dir, restoreReservedChars(item.Name))
o, err := f.newObjectWithInfo(remote, item)
if err != nil {
continue
}
entries = append(entries, o)
}
//fmt.Printf("Entries: %+v\n", entries)
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object) {
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
size: size,
modTime: modTime,
}
return o
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
return o, o.Update(in, src, options...)
}
// mkParentDir makes the parent of the native path dirPath if
// necessary and any directories above that
func (f *Fs) mkParentDir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists
if strings.HasSuffix(dirPath, "/") {
dirPath = dirPath[:len(dirPath)-1]
}
parent := path.Dir(dirPath)
if parent == "." {
parent = ""
}
return f.Mkdir(parent)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
_, err := f.CreateDir(dir)
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(dir string, check bool) (err error) {
root := path.Join(f.root, dir)
if root == "" {
return errors.New("can't purge root directory")
}
// check that the directory exists
entries, err := f.List(dir)
if err != nil {
return err
}
if check {
if len(entries) != 0 {
return fs.ErrorDirectoryNotEmpty
}
}
opts := rest.Opts{
Method: "POST",
Path: f.filePath(dir),
Parameters: url.Values{},
NoResponse: true,
}
opts.Parameters.Set("dlDir", "true")
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
}
// TODO: Parse response?
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
return f.purgeCheck("", false)
}
// copyOrMoves copys or moves directories or files depending on the mthod parameter
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: src,
Parameters: url.Values{},
}
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return info, nil
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
if err != nil {
return nil, err
}
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "copy failed")
}
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, &result)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
if err != nil {
return nil, err
}
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "move failed")
}
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, result)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
var err error
_, err = f.List(dstRemote)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "moveDir failed")
}
return nil
}
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
info, err := f.getAccountInfo()
if err != nil {
return nil, err
}
usage := &fs.Usage{
Total: fs.NewUsageValue(info.Capacity),
Used: fs.NewUsageValue(info.Usage),
}
return usage, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ---------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
return o.mimeType
}
// setMetaData sets the metadata from info
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.hasMetaData = true
o.size = int64(info.Size)
o.md5 = info.MD5
o.mimeType = info.MimeType
o.modTime = time.Time(info.ModifiedAt)
return nil
}
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
return err
}
return o.setMetaData(info)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Storable returns a boolean showing whether this object storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: o.filePath(),
Parameters: url.Values{},
Options: options,
}
opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Read the md5 of in returning a reader which will read the same contents
//
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
// we need a MD5
md5Hasher := md5.New()
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
teeReader := io.TeeReader(in, md5Hasher)
// nothing to clean up by default
cleanup = func() {}
// don't cache small files on disk to reduce wear of the disk
if size > threshold {
var tempFile *os.File
// create the cache file
tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil {
return
}
_ = os.Remove(tempFile.Name()) // Delete the file - may not work on Windows
// clean up the file after we are done downloading
cleanup = func() {
// the file should normally already be close, but just to make sure
_ = tempFile.Close()
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
}
// copy the ENTIRE file to disc and calculate the MD5 in the process
if _, err = io.Copy(tempFile, teeReader); err != nil {
return
}
// jump to the start of the local file so we can pass it along
if _, err = tempFile.Seek(0, 0); err != nil {
return
}
// replace the already read source with a reader of our cached file
out = tempFile
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = ioutil.ReadAll(teeReader)
if err != nil {
return
}
// set the reader to our read memory block
out = bytes.NewReader(inData)
}
return hex.EncodeToString(md5Hasher.Sum(nil)), out, cleanup, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
md5String, err := src.Hash(hash.MD5)
if err != nil || md5String == "" {
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
var wrap accounting.WrapFn
in, wrap = accounting.UnWrap(in)
var cleanup func()
md5String, in, cleanup, err = readMD5(in, size, int64(o.fs.opt.MD5MemoryThreshold))
defer cleanup()
if err != nil {
return errors.Wrap(err, "failed to calculate MD5")
}
// Wrap the accounting back onto the stream
in = wrap(in)
}
var resp *http.Response
var result api.JottaFile
opts := rest.Opts{
Method: "POST",
Path: o.filePath(),
Body: in,
ContentType: fs.MimeType(src),
ContentLength: &size,
ExtraHeaders: make(map[string]string),
Parameters: url.Values{},
}
opts.ExtraHeaders["JMd5"] = md5String
opts.Parameters.Set("cphash", md5String)
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
// opts.ExtraHeaders["JCreated"] = api.Time(src.ModTime()).String()
opts.ExtraHeaders["JModified"] = api.Time(src.ModTime()).String()
// Parameters observed in other implementations
//opts.ExtraHeaders["X-Jfs-DeviceName"] = "Jotta"
//opts.ExtraHeaders["X-Jfs-Devicename-Base64"] = ""
//opts.ExtraHeaders["X-Jftp-Version"] = "2.4" this appears to be the current version
//opts.ExtraHeaders["jx_csid"] = ""
//opts.ExtraHeaders["jx_lisence"] = ""
opts.Parameters.Set("umode", "nomultipart")
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
// TODO: Check returned Metadata? Timeout on big uploads?
return o.setMetaData(&result)
}
// Remove an object
func (o *Object) Remove() error {
opts := rest.Opts{
Method: "POST",
Path: o.filePath(),
Parameters: url.Values{},
}
opts.Parameters.Set("dl", "true")
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
return shouldRetry(resp, err)
})
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,67 +0,0 @@
package jottacloud
import (
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// A test reader to return a test pattern of size
type testReader struct {
size int64
c byte
}
// Reader is the interface that wraps the basic Read method.
func (r *testReader) Read(p []byte) (n int, err error) {
for i := range p {
if r.size <= 0 {
return n, io.EOF
}
p[i] = r.c
r.c = (r.c + 1) % 253
r.size--
n++
}
return
}
func TestReadMD5(t *testing.T) {
// smoke test the reader
b, err := ioutil.ReadAll(&testReader{size: 10})
require.NoError(t, err)
assert.Equal(t, []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, b)
// Check readMD5 for different size and threshold
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
hasher := md5.New()
n, err := io.Copy(hasher, &testReader{size: size})
require.NoError(t, err)
assert.Equal(t, n, size)
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
in := &testReader{size: size}
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
defer cleanup()
require.NoError(t, err)
assert.Equal(t, wantMD5, gotMD5)
// check md5hash of out
hasher := md5.New()
n, err := io.Copy(hasher, out)
require.NoError(t, err)
assert.Equal(t, n, size)
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
assert.Equal(t, wantMD5, outMD5)
})
}
})
}
}

View File

@@ -1,17 +0,0 @@
// Test Box filesystem interface
package jottacloud_test
import (
"testing"
"github.com/ncw/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestJottacloud:",
NilObject: (*jottacloud.Object)(nil),
})
}

View File

@@ -1,84 +0,0 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaClous reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'+': '', // FULLWIDTH PLUS SIGN
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
'!': '', // FULLWIDTH EXCLAMATION MARK
'&': '', // FULLWIDTH AMPERSAND
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'#': '', // FULLWIDTH NUMBER SIGN
'%': '', // FULLWIDTH PERCENT SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'~': '', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -1,28 +0,0 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\+*<>?!&:;|#%"'~`, ``},
{`\+*<>?!&:;|#%"'~\+*<>?!&:;|#%"'~`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,29 +0,0 @@
// +build darwin dragonfly freebsd linux
package local
import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

View File

@@ -1,36 +0,0 @@
// +build windows
package local
import (
"syscall"
"unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != syscall.Errno(0) {
return nil, errors.Wrap(e1, "failed to read disk usage")
}
usage := &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(total - free), // bytes in use
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
// check interface
var _ fs.Abouter = &Fs{}

View File

@@ -1,993 +0,0 @@
// Package local provides a filesystem interface
package local
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "skip_links",
Help: "Don't warn about skipped symlinks.",
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: "Don't apply unicode normalization to paths and filenames",
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: "Don't check to see if the files change during upload",
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
return &Object{
fs: f,
remote: remote,
path: dstPath,
}
}
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
err := o.lstat()
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorObjectNotFound
}
if os.IsPermission(err) {
return nil, fs.ErrorPermissionDenied
}
return nil, err
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
}
fd, err := os.Open(fsDirPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
}
defer func() {
cerr := fd.Close()
if cerr != nil && err == nil {
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
}
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if err != nil {
return nil, err
}
mode = fi.Mode()
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
if fso.Storable() {
entries = append(entries, fso)
}
}
}
}
return entries, nil
}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
name = string([]rune(name))
}
name = filepath.ToSlash(name)
return name
}
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "")
err := o.Update(in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(root)
if err != nil {
return err
}
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
return nil
}
// Rmdir removes the directory
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
return f.precision
}
// Read the precision
func (f *Fs) readPrecision() (precision time.Duration) {
// Default precision of 1s
precision = time.Second
// Create temporary file and test it
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
return time.Second
}
path := fd.Name()
// fmt.Println("Created temp file", path)
err = fd.Close()
if err != nil {
return time.Second
}
// Delete it on return
defer func() {
// fmt.Println("Remove temp file")
_ = os.Remove(path) // ignore error
}()
// Find the minimum duration we can detect
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
// Current time with delta
t := time.Unix(time.Now().Unix(), int64(duration))
err := os.Chtimes(path, t, t)
if err != nil {
// fmt.Println("Failed to Chtimes", err)
break
}
// Read the actual time back
fi, err := os.Stat(path)
if err != nil {
// fmt.Println("Failed to Stat", err)
break
}
// If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration)
return duration
}
}
return
}
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge() error {
fi, err := f.lstat(f.root)
if err != nil {
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Temporary Object under construction
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
if os.IsNotExist(err) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
// Create destination
err = dstObj.mkdirAll()
if err != nil {
return nil, err
}
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return nil, err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return nil, err
} else if err != nil {
// not quite clear, but probably trying to move a file across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move: %v: trying copy", err)
return nil, fs.ErrorCantMove
}
// Update the info
err = dstObj.lstat()
if err != nil {
return nil, err
}
return dstObj, nil
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
if !os.IsNotExist(err) {
return fs.ErrorDirExists
}
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
}
// Do the move
err = os.Rename(srcPath, dstPath)
if os.IsNotExist(err) {
// race condition, source was deleted in the meantime
return err
} else if os.IsPermission(err) {
// not enough rights to write to dst
return err
} else if err != nil {
// not quite clear, but probably trying to move directory across file system
// boundaries. Copying might still work.
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
return fs.ErrorCantDirMove
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
if err != nil {
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectHashesMu.Lock()
hashes := o.hashes
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
hashes, err = hash.Stream(in)
closeErr := in.Close()
if err != nil {
return "", errors.Wrap(err, "hash: failed to read")
}
if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
o.fs.objectHashesMu.Lock()
o.hashes = hashes
o.fs.objectHashesMu.Unlock()
}
return hashes[r], nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime() time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
// Re-read metadata
return o.lstat()
}
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")
return false
}
return true
}
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
// object that is read
type localOpenFile struct {
o *Object // object that is open
in io.ReadCloser // handle we are wrapping
hash *hash.MultiHasher // currently accumulating hashes
fd *os.File // file object reference
}
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !file.o.fs.opt.NoCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
n, err = file.in.Read(p)
if n > 0 {
// Hash routines never return an error
_, _ = file.hash.Write(p[:n])
}
return
}
// Close the object and update the hashes
func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
fd, err := os.Open(o.path)
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
return in, nil
}
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
return os.MkdirAll(dir, 0777)
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
hashes = x.Hashes
}
}
err := o.mkdirAll()
if err != nil {
return err
}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
if err == nil {
err = closeErr
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
// All successful so update the hashes
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(src.ModTime())
if err != nil {
return err
}
// ReRead info now that we have finished
return o.lstat()
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
o.setMetadata(info)
}
return err
}
// Remove an object
func (o *Object) Remove() error {
return remove(o.path)
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
if !f.opt.NoUNC {
// Convert to UNC
s = uncPath(s)
}
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -1,74 +0,0 @@
package local
import (
"os"
"path"
"testing"
"time"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestMapper(t *testing.T) {
m := newMapper()
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "potato", m.Save("potato", "potato"))
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
assert.Equal(t, m.m, map[string]string{
"-r'áö": "-r?'a´o¨",
})
assert.Equal(t, "potato", m.Load("potato"))
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
}
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := os.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
fi, err := fd.Stat()
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
in := localOpenFile{
o: o,
in: wrappedFd,
hash: hash,
fd: fd,
}
buf := make([]byte, 1)
_, err = in.Read(buf)
require.NoError(t, err)
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.Errorf(t, err, "can't copy - source file is being updated")
// turn the checking off and try again
in.o.fs.opt.NoCheckUpdated = true
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}

View File

@@ -1,17 +0,0 @@
// Test Local filesystem interface
package local_test
import (
"testing"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
})
}

View File

@@ -1,13 +0,0 @@
// Device reading functions
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package local
import "os"
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
return devUnset
}

View File

@@ -1,26 +0,0 @@
// Device reading functions
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package local
import (
"os"
"syscall"
"github.com/ncw/rclone/fs"
)
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
if !oneFileSystem {
return devUnset
}
statT, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev)
}

View File

@@ -1,10 +0,0 @@
//+build !windows
package local
import "os"
// Removes name, retrying on a sharing violation
func remove(name string) error {
return os.Remove(name)
}

Some files were not shown because too many files have changed in this diff Show More