1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-12 05:23:55 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
87d64e7fb4 mount: use the equivalent of kernel_cache by default #FIXME WIP 2018-07-11 14:56:17 +01:00
11874 changed files with 8194992 additions and 208051 deletions

View File

@@ -4,9 +4,6 @@ os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
@@ -46,4 +43,4 @@ artifacts:
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -1,4 +1,3 @@
---
version: 2
jobs:
@@ -14,10 +13,10 @@ jobs:
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
@@ -30,21 +29,6 @@ jobs:
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

View File

@@ -1,31 +0,0 @@
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/ncw/rclone/issues/new
otherwise fill in the form below.
Thank you
The Rclone Developers
-->
#### Output of `rclone version`
#### Describe the issue

View File

@@ -1,50 +0,0 @@
---
name: Bug report
about: Report a problem with rclone
---
<!--
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
https://forum.rclone.org/
instead of filing an issue for a quick response.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
Thank you
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (output from `rclone version`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,36 +0,0 @@
---
name: Feature request
about: Suggest a new feature or enhancement for rclone
---
<!--
Welcome :-)
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
Here is a checklist of things to do:
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
2. Discuss on the forum first: https://forum.rclone.org/
3. Make a feature request issue (this is the right place!).
4. Be prepared to get involved making the feature :-)
Looking forward to your great idea!
The Rclone Developers
-->
#### What is your current rclone version (output from `rclone version`)?
#### What problem are you are trying to solve?
#### How do you think rclone should be changed to solve that?

View File

@@ -1,29 +0,0 @@
<!--
Thank you very much for contributing code or documentation to rclone! Please
fill out the following questions to make it easier for us to review your
changes.
You do not need to check all the boxes below all at once, feel free to take
your time and add more commits. If you're done and ready for review, please
check the last box.
-->
#### What is the purpose of this change?
<!--
Describe the changes here
-->
#### Was the change discussed in an issue or in the forum before?
<!--
Link issues and relevant forum posts here.
-->
#### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-)

View File

@@ -1,30 +0,0 @@
# golangci-lint configuration options
run:
build-tags:
- cmount
linters:
enable:
- deadcode
- errcheck
- goimports
- golint
- ineffassign
- structcheck
- varcheck
- govet
- unconvert
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

14
.gometalinter.json Normal file
View File

@@ -0,0 +1,14 @@
{
"Enable": [
"deadcode",
"errcheck",
"goimports",
"golint",
"ineffassign",
"structcheck",
"varcheck",
"vet"
],
"EnableGC": true,
"Vendor": true
}

View File

@@ -1,109 +1,50 @@
---
language: go
sudo: required
dist: trusty
os:
- linux
go_import_path: github.com/ncw/rclone
- linux
go:
- 1.7.6
- 1.8.7
- 1.9.3
- "1.10.1"
- tip
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- make vars
- git fetch --unshallow --tags
- make vars
- make build_dep
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
- GO111MODULE=off
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
- fuse
- libfuse-dev
- rpm
- pkg-config
matrix:
allow_failures:
- go: tip
- go: tip
include:
- go: 1.8.x
script:
- make quicktest
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
env:
- GOTAGS=cmount
script:
- make build_dep
- make check
- make quicktest
- make racequicktest
- make compile_all
- os: osx
go: 1.12.x
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
- os: osx
go: "1.10.1"
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: ncw/rclone
all_branches: true
go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"
go: "1.10.1"
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -21,19 +21,19 @@ with the [latest beta of rclone](https://beta.rclone.org/):
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub
First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone).
Now in your terminal
go get -u github.com/ncw/rclone
go get github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
@@ -64,31 +64,22 @@ packages which you can install with
Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* Follow the [commit message guidelines](#commit-messages).
* Add [unit tests](#testing) for a new feature
* Add documentation for a new feature (see below for where)
* Add unit tests for a new feature
* squash commits down to one per feature
* rebase to master with `git rebase master`
* rebase to master `git rebase master`
When you are done with that
git push origin my-new-feature
git push origin my-new-feature
Go to the GitHub website and click [Create pull
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
## Enabling CI for your fork ##
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
rebase it to master then push it to Github with `--force`.
## Testing ##
@@ -123,13 +114,6 @@ but they can be run against any of the remotes.
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
then change into the project root and run
@@ -182,21 +166,17 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vendor - 3rd party code managed by the dep tool
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
If you add a new general flag (not for a backend), then document it in
If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field. The first line of this is used
for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
@@ -215,20 +195,14 @@ file.
## Commit messages ##
Please make the first line of your commit message a summary of the
change that a user (not a developer) of rclone would like to read, and
prefix it with the directory of the change followed by a colon. The
changelog gets made by looking at just these first lines so make it
good!
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
Writing more is better than less. Comparing the behaviour before the
change to that after the change is very useful. Imagine you are
writing to yourself in 12 months time when you've forgotten everything
about what you just did and you need to get up to speed quickly.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
@@ -255,53 +229,37 @@ Fixes #1498
## Adding a dependency ##
rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
rclone uses the [dep](https://github.com/golang/dep) tool to manage
its dependencies. All code that rclone needs for building is stored
in the `vendor` directory for perfectly reproducable builds.
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
The `vendor` directory is entirely managed by the `dep` tool.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
To add a new dependency, run `dep ensure` and `dep` will pull in the
new dependency to the `vendor` directory and update the `Gopkg.lock`
file.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
You can add constraints on that package in the `Gopkg.toml` file (see
the `dep` documentation), but don't unless you really need to.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
Please check in the changes generated by `dep` including the `vendor`
directory and `Godep.toml` and `Godep.lock` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.
## Updating a dependency ##
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
dep ensure -update github.com/pkg/errors
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just uses the go modules to update all the modules to their latest
stable release. Check in the changes in a single commit as above.
just runs `dep ensure -update`. Check in the changes in a single
commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
@@ -350,13 +308,7 @@ Unit tests
Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backend remote
Or if you want to run the integration tests manually:
* Add your fs to `fstest/test_all/test_all.go`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`
@@ -371,10 +323,11 @@ See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

490
Gopkg.lock generated Normal file
View File

@@ -0,0 +1,490 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [
".",
"fs",
"fuseutil"
]
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
version = "v0.23.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = [
"storage",
"version"
]
revision = "fbe7db0e3f9793ba3e5704efbab84f51436c136e"
version = "v18.0.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = [
"autorest",
"autorest/adal",
"autorest/azure",
"autorest/date"
]
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
version = "v10.12.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
packages = ["."]
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
version = "v0.4.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/csm",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/eventstream",
"private/protocol/eventstream/eventstreamapi",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/s3/s3iface",
"service/s3/s3manager",
"service/sts"
]
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
version = "v1.14.8"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
version = "v1.0.8"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/djherbis/times"
packages = ["."]
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
version = "v1.0.1"
[[projects]]
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
"dropbox/async",
"dropbox/common",
"dropbox/file_properties",
"dropbox/files",
"dropbox/seen_state",
"dropbox/sharing",
"dropbox/team_common",
"dropbox/team_policies",
"dropbox/users",
"dropbox/users_common"
]
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
version = "v4.1.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
version = "v1.37.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
version = "v1.0.39"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
[[projects]]
branch = "master"
name = "github.com/okzk/sdnotify"
packages = ["."]
revision = "ed8ca104421a21947710335006107540e3ecb335"
[[projects]]
name = "github.com/patrickmn/go-cache"
packages = ["."]
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
version = "v2.1.0"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = [
"buffer",
"check",
"convert",
"log",
"reopen"
]
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
version = "v0.2.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
version = "1.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
version = "v1.5.1"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
"config",
"logger",
"request",
"request/builder",
"request/data",
"request/errors",
"request/signer",
"request/unpacker",
"service",
"utils"
]
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
version = "v2.2.14"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"curve25519",
"ed25519",
"ed25519/internal/edwards25519",
"internal/chacha20",
"internal/subtle",
"nacl/secretbox",
"pbkdf2",
"poly1305",
"salsa20/salsa",
"scrypt",
"ssh",
"ssh/agent",
"ssh/terminal"
]
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = [
"drive/v3",
"gensupport",
"googleapi",
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"log",
"urlfetch"
]
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
solver-name = "gps-cdcl"
solver-version = 1

11
Gopkg.toml Normal file
View File

@@ -0,0 +1,11 @@
# pin this to master to pull in the macOS changes
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.43
[[override]]
branch = "master"
name = "github.com/coreos/bbolt"

43
ISSUE_TEMPLATE.md Normal file
View File

@@ -0,0 +1,43 @@
<!--
Hi!
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
https://forum.rclone.org/
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
If you think you might have found a bug, please can you try to replicate it with the latest beta?
https://beta.rclone.org/
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
Thanks
The Rclone Developers
-->
#### What is the problem you are having with rclone?
#### What is your rclone version (eg output from `rclone -V`)
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
#### Which cloud storage system are you using? (eg Google Drive)
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,17 +1,12 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are:
Current active maintainers of rclone are
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
* Nick Craig-Wood @ncw
* Stefan Breunig @breunigs
* Ishuah Kariuki @ishuah
* Remus Bunduc @remusb - cache subsystem maintainer
* Fabian Möller @B4dM4n
**This is a work in progress Draft**
@@ -61,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.

File diff suppressed because it is too large Load Diff

8948
MANUAL.md

File diff suppressed because it is too large Load Diff

7625
MANUAL.txt

File diff suppressed because it is too large Load Diff

117
Makefile
View File

@@ -1,9 +1,5 @@
SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
@@ -11,12 +7,12 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
BRANCH_PATH :=
endif
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.9
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
@@ -40,6 +36,7 @@ vars:
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'"
version:
@@ -47,29 +44,47 @@ version:
# Full suite of integration tests
test: rclone
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
ifdef FULL_TESTS
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------"
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------"
ifdef FULL_TESTS
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) ./...
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping source quality tests as version of go too old
endif
gometalinter_install:
go get -u github.com/alecthomas/gometalinter
gometalinter --install --update
# We aren't using gometalinter as the default linter yet because
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
# 2. can't get -printfuncs working with the vet linter
gometalinter:
gometalinter ./...
# Get the build dependencies
build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/tools/godep
endif
# Get the release dependencies
release_dep:
@@ -78,16 +93,15 @@ release_dep:
# Update dependencies
update:
GO111MODULE=on go get -u ./...
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
go get -u github.com/golang/dep/cmd/dep
dep ensure -update -v
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
doc: rclone.1 MANUAL.html MANUAL.txt
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
@@ -97,10 +111,7 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py
./bin/make_backend_docs.py
rclone gendocs docs/content/commands/
rcdocs: rclone
bin/make_rc_docs.sh
@@ -135,8 +146,8 @@ check_sign:
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
upload:
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
upload_github:
./bin/upload-github $(TAG)
@@ -145,15 +156,19 @@ cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -162,38 +177,29 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -P sync $(BETA_UPLOAD) build/
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website
cd docs && hugo server -v -w
@@ -204,10 +210,10 @@ tag: doc
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
@@ -220,3 +226,4 @@ startdev:
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

106
README.md
View File

@@ -2,11 +2,10 @@
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Download](https://rclone.org/downloads/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) |
[Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
@@ -14,84 +13,49 @@
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone
Rclone is a command line program to sync files and directories to and from
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
* Amazon Drive
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Mega
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* OpenDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* pCloud
* QingStor
* SFTP
* Webdav / Owncloud / Nextcloud
* Yandex Disk
* The local filesystem
## Storage providers
Features
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
* MD5/SHA-1 hashes checked at all times for file integrity
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption (Crypt)
* Optional FUSE mount
## Installation & documentation
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
Please see the [rclone website](https://rclone.org/) for:
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
* https://rclone.org/downloads/
* https://rclone.org/
License
-------
This is free software under the terms of MIT the license (check the
[COPYING file](/COPYING) included in this package).
COPYING file included in this package).

View File

@@ -13,9 +13,14 @@ Making a release
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX"
* make retag
* make release_dep
* # Set the GOPATH for a current stable go compiler
* make cross
* git checkout docs/content/commands # to undo date changes in commands
* git push --tags origin master
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* git push --tags origin master:stable # update the stable branch for packager.io
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make tarball
* make sign_upload
* make check_sign
@@ -26,45 +31,11 @@ Making a release
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* Review any pinned packages in Gopkg.toml and remove if possible
* make update
* git status
* git add new files
* carry forward any patches to vendor stuff
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
Make the version number be just in a file?

View File

@@ -2,12 +2,12 @@ package alias
import (
"errors"
"path"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/config"
)
// Register with Fs
@@ -17,38 +17,29 @@ func init() {
Description: "Alias for a existing remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
Required: true,
Name: "remote",
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// NewFs constructs an Fs from the path.
// NewFs contstructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.Remote == "" {
func NewFs(name, root string) (fs.Fs, error) {
remote := config.FileGet(name, "remote")
if remote == "" {
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
}
if strings.HasPrefix(opt.Remote, name+":") {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
root = filepath.ToSlash(root)
return fsInfo.NewFs(configName, path.Join(fsPath, root))
}

View File

@@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
wantEntry := test.entries[i]
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
_, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.isDir, isDir, what)
}

View File

@@ -15,8 +15,6 @@ import (
_ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"
@@ -26,7 +24,6 @@ import (
_ "github.com/ncw/rclone/backend/s3"
_ "github.com/ncw/rclone/backend/sftp"
_ "github.com/ncw/rclone/backend/swift"
_ "github.com/ncw/rclone/backend/union"
_ "github.com/ncw/rclone/backend/webdav"
_ "github.com/ncw/rclone/backend/yandex"
)

View File

@@ -21,11 +21,10 @@ import (
"strings"
"time"
acd "github.com/ncw/go-acd"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -38,17 +37,19 @@ import (
)
const (
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time.Millisecond
warnFileSize = 50000 << 20 // Display warning for files larger than this size
)
// Globals
var (
// Flags
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
// Description of how to auth for this app
acdConfig = &oauth2.Config{
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
@@ -66,96 +67,40 @@ var (
func init() {
fs.Register(&fs.RegInfo{
Name: "amazon cloud drive",
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
Config: func(name string) {
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
Name: config.ConfigClientID,
Help: "Amazon Application Client Id - required.",
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret - required.",
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
Name: config.ConfigAuthURL,
Help: "Auth server URL - leave blank to use Amazon's.",
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
conflict errors as rclone retries the failed upload but the file will
most likely appear correctly eventually.
These values were determined empirically by observing lots of uploads
of big files for a range of file sizes.
Upload with the "-v" flag to see more info about what rclone is doing
in this situation.`,
Default: fs.Duration(180 * time.Second),
Advanced: true,
}, {
Name: "templink_threshold",
Help: `Files >= this size will be downloaded via their tempLink.
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"
which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
Name: config.ConfigTokenURL,
Help: "Token server url - leave blank to use Amazon's.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
}
// Fs represents a remote acd server
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
c *acd.Client // the connection to the acd server
noAuthClient *http.Client // unauthenticated http client
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
trueRootID string // ID of true root directory
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -246,13 +191,7 @@ func filterRequest(req *http.Request) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
@@ -262,18 +201,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else {
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
log.Fatalf("Failed to configure Amazon Drive: %v", err)
}
c := acd.NewClient(oAuthClient)
f := &Fs{
name: name,
root: root,
opt: *opt,
c: c,
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
noAuthClient: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
@@ -312,16 +250,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -329,13 +267,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -594,13 +527,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
}
// Don't wait for uploads - assume they will appear later
if f.opt.UploadWaitPerGB <= 0 {
if *uploadWaitPerGB <= 0 {
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr
}
// Time we should wait for the upload
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
const sleepTime = 5 * time.Second // sleep between tries
@@ -1082,7 +1015,7 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
bigObject := o.Size() >= int64(tempLinkThreshold)
if bigObject {
fs.Debugf(o, "Downloading large object via tempLink")
}
@@ -1274,38 +1207,24 @@ func (o *Object) MimeType() string {
// Automatically restarts itself in case of unexpected behaviour of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
checkpoint := config.FileGet(f.name, "checkpoint")
quit := make(chan bool)
go func() {
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if pollInterval == 0 {
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
} else {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
fs.Debugf(f, "Unable to save checkpoint: %v", err)
}
case <-quit:
return
case <-time.After(pollInterval):
}
}
}()
return quit
}
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
// +build !plan9,!solaris,go1.8
package azureblob
import (
"testing"
"github.com/stretchr/testify/assert"
)
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}

View File

@@ -1,37 +1,17 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,go1.8
package azureblob
package azureblob_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/azureblob"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,6 +0,0 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris !go1.8
package azureblob

View File

@@ -17,12 +17,12 @@ type Error struct {
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal satisfies the Fatal interface
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
@@ -31,6 +31,11 @@ func (e *Error) Fatal() bool {
var _ fserrors.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
@@ -69,7 +74,7 @@ const versionFormat = "-v2006-01-02-150405.000"
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
@@ -100,22 +105,22 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return time.Time(t).IsZero()
return (time.Time)(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if time.Time(t).IsZero() {
if (time.Time)(t).IsZero() {
return false
}
if time.Time(s).IsZero() {
if (time.Time)(s).IsZero() {
return false
}
return time.Time(t).Equal(time.Time(s))
return (time.Time)(t).Equal((time.Time)(s))
}
// File is info about a file
@@ -132,27 +137,10 @@ type File struct {
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketName string `json:"bucketName,omitempty"` // When specified, the result will be a list containing just this bucket.
BucketTypes []string `json:"bucketTypes,omitempty"` // If present, B2 will use it as a filter for bucket types returned in the list buckets response.
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call

View File

@@ -22,8 +22,8 @@ import (
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -34,27 +34,30 @@ import (
)
const (
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
timeKey = "src_last_modified_millis"
timeHeader = headerPrefix + timeKey
sha1Key = "large_file_sha1"
sha1Header = "X-Bz-Content-Sha1"
sha1InfoHeader = headerPrefix + sha1Key
testModeHeader = "X-Bz-Test-Mode"
retryAfterHeader = "Retry-After"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
)
// Globals
var (
minChunkSize = fs.SizeSuffix(5E6)
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
uploadCutoff = fs.SizeSuffix(200E6)
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
)
@@ -65,98 +68,29 @@ func init() {
Description: "Backblaze B2",
NewFs: NewFs,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID",
Required: true,
Name: "account",
Help: "Account ID",
}, {
Name: "key",
Help: "Application Key",
Required: true,
Name: "key",
Help: "Application Key",
}, {
Name: "endpoint",
Help: "Endpoint for the service.\nLeave blank normally.",
Advanced: true,
}, {
Name: "test_mode",
Help: `A flag string for X-Bz-Test-Mode header for debugging.
This is for debugging purposes only. Setting it to one of the strings
below will cause b2 to return specific errors:
* "fail_some_uploads"
* "expire_some_account_authorization_tokens"
* "force_cap_exceeded"
These will be set in the "X-Bz-Test-Mode" header which is documented
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
Default: "",
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "versions",
Help: "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Permanently delete files on remote removal, otherwise hide files.",
Default: false,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
When uploading large files, chunk the file into this size. Note that
these chunks are buffered in memory and there might a maximum of
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
Name: "download_url",
Help: `Custom endpoint for downloads.
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}},
Name: "endpoint",
Help: "Endpoint for the service - leave blank normally.",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
}
// Fs represents a remote b2 server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
account string // account name
key string // auth key
endpoint string // name of the starting api endpoint
srv *rest.Client // the connection to the b2 server
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
@@ -167,7 +101,7 @@ type Fs struct {
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
pacer *pacer.Pacer // To pace and retry the API calls
bufferTokens chan []byte // control concurrency of multipart uploads
}
@@ -211,7 +145,7 @@ func (f *Fs) Features() *fs.Features {
}
// Pattern to match a b2 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a b2 'url'
func parsePath(path string) (bucket, directory string, err error) {
@@ -251,7 +185,13 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
}
}
return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
retryAfterDuration := time.Duration(retryAfter) * time.Second
if f.pacer.GetSleep() < retryAfterDuration {
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
// We set 1/2 the value here because the pacer will double it immediately
f.pacer.SetSleep(retryAfterDuration / 2)
}
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -291,73 +231,37 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadCutoff < chunkSize {
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
if cs < opt.ChunkSize {
return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(&f.opt, cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadCutoff(opt, opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "b2: upload cutoff")
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
if chunkSize < minChunkSize {
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
}
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Account == "" {
account := config.FileGet(name, "account")
if account == "" {
return nil, errors.New("account not found")
}
if opt.Key == "" {
key := config.FileGet(name, "key")
if key == "" {
return nil, errors.New("key not found")
}
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
f := &Fs{
name: name,
opt: *opt,
bucket: bucket,
root: directory,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
name: name,
bucket: bucket,
root: directory,
account: account,
key: key,
endpoint: endpoint,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
bufferTokens: make(chan []byte, fs.Config.Transfers),
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -365,28 +269,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
BucketBased: true,
}).Fill(f)
// Set the test flag if required
if opt.TestMode != "" {
testMode := strings.TrimSpace(opt.TestMode)
if *b2TestMode != "" {
testMode := strings.TrimSpace(*b2TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
f.fillBufferTokens()
// Fill up the buffer tokens
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
err = f.authorizeAccount()
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
}
// If this is a key limited to a single bucket, it must exist already
if f.bucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.info.Allowed.BucketName
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
if allowedBucket != f.bucket {
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
}
f.markBucketOK()
f.setBucketID(f.info.Allowed.BucketID)
}
if f.root != "" {
f.root += "/"
// Check to see if the (bucket,directory) is actually an existing file
@@ -421,9 +316,9 @@ func (f *Fs) authorizeAccount() error {
opts := rest.Opts{
Method: "GET",
Path: "/b2api/v1/b2_authorize_account",
RootURL: f.opt.Endpoint,
UserName: f.opt.Account,
Password: f.opt.Key,
RootURL: f.endpoint,
UserName: f.account,
Password: f.key,
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
}
err := f.pacer.Call(func() (bool, error) {
@@ -485,19 +380,11 @@ func (f *Fs) clearUploadURL() {
f.uploadMu.Unlock()
}
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, f.opt.ChunkSize)
buf = make([]byte, chunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
@@ -506,7 +393,7 @@ func (f *Fs) getUploadBlock() []byte {
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(f.opt.ChunkSize) {
if len(buf) != int(chunkSize) {
panic("bad blocksize returned to pool")
}
// fs.Debugf(f, "Returning upload block %p", buf)
@@ -676,7 +563,7 @@ func (f *Fs) markBucketOK() {
// listDir lists a single directory
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
last := ""
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
@@ -748,7 +635,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
}
list := walk.NewListRHelper(callback)
last := ""
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil {
return err
@@ -768,11 +655,7 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
var account = api.Account{ID: f.info.AccountID}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
@@ -952,13 +835,6 @@ func (f *Fs) hide(Name string) error {
return f.shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "already_hidden" {
// sometimes eventual consistency causes this, so
// ignore this error since it is harmless
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", Name)
}
return nil
@@ -1003,12 +879,6 @@ func (f *Fs) purge(oldOnly bool) error {
errReturn = err
}
}
var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
if time.Since(time.Time(timestamp)).Hours() > 24 {
return true
}
return false
}
// Delete Config.Transfers in parallel
toBeDeleted := make(chan *api.File, fs.Config.Transfers)
@@ -1032,9 +902,6 @@ func (f *Fs) purge(oldOnly bool) error {
if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
@@ -1168,12 +1035,12 @@ func (o *Object) readMetaData() (err error) {
maxSearched := 1
var timestamp api.Timestamp
baseRemote := o.remote
if o.fs.opt.Versions {
if *b2Versions {
timestamp, baseRemote = api.RemoveVersion(baseRemote)
maxSearched = maxVersions
}
var info *api.File
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
return nil
}
@@ -1306,17 +1173,9 @@ var _ io.ReadCloser = &openFile{}
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: o.fs.info.DownloadURL,
Options: options,
}
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
@@ -1395,7 +1254,7 @@ func urlEncode(in string) string {
//
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.Versions {
if *b2Versions {
return errNotWithVersions
}
err = o.fs.Mkdir("")
@@ -1430,7 +1289,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
} else {
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
} else if size > int64(uploadCutoff) {
up, err := o.fs.newLargeUpload(o, in, src)
if err != nil {
return err
@@ -1477,7 +1336,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// Content-Type b2/x-auto to automatically set the stored Content-Type
// post upload. In the case where a file extension is absent or the
// lookup fails, the Content-Type is set to application/octet-stream. The
// Content-Type mappings can be pursued here.
// Content-Type mappings can be purused here.
//
// X-Bz-Content-Sha1
// required
@@ -1524,6 +1383,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
},
ContentLength: &size,
}
// for go1.8 (see release notes) we must nil the Body if we want a
// "Content-Length: 0" header which b2 requires for all files.
if size == 0 {
opts.Body = nil
}
var response api.FileInfo
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1544,10 +1408,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// Remove an object
func (o *Object) Remove() error {
if o.fs.opt.Versions {
if *b2Versions {
return errNotWithVersions
}
if o.fs.opt.HardDelete {
if *b2HardDelete {
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
}
return o.fs.hide(o.fs.root + o.remote)

View File

@@ -1,10 +1,10 @@
// Test B2 filesystem interface
package b2
package b2_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/b2"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,23 +12,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
NeedMultipleChunks: true,
},
NilObject: (*b2.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
} else {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
@@ -116,10 +116,8 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
@@ -411,8 +409,8 @@ outer:
}
reqSize := remaining
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Get a block of memory

View File

@@ -45,7 +45,7 @@ type Error struct {
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
@@ -57,11 +57,11 @@ func (e *Error) Error() string {
return out
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item
const (
@@ -86,10 +86,6 @@ type Item struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// ModTime returns the modification time of the item
@@ -149,14 +145,6 @@ type CopyFile struct {
Parent Parent `json:"parent"`
}
// CreateSharedLink is the request for Public Link
type CreateSharedLink struct {
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
} `json:"shared_link"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
@@ -184,8 +172,8 @@ type UploadSessionResponse struct {
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int64 `json:"offset"`
Size int64 `json:"size"`
Offset int `json:"offset"`
Size int `json:"size"`
Sha1 string `json:"sha1"`
}

View File

@@ -23,8 +23,7 @@ import (
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -47,7 +46,6 @@ const (
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
)
// Globals
@@ -63,6 +61,7 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
)
// Register with Fs
@@ -71,47 +70,31 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("box", name, m, oauthConfig)
Config: func(name string) {
err := oauthutil.Config("box", name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
Help: "Box App Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
Name: "commit_retries",
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
Help: "Box App Client Secret - leave blank normally.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
@@ -126,7 +109,6 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
publicLink string // Public Link for the object
sha1 string // SHA-1 of the object content
}
@@ -171,13 +153,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
authRety := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
authRety = true
fs.Debugf(nil, "Should retry: %v", err)
}
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
@@ -237,30 +219,22 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.UploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
func NewFs(name, root string) (fs.Fs, error) {
if uploadCutoff < minUploadCutoff {
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
}
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
log.Fatalf("Failed to configure Box: %v", err)
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
f.features = (&fs.Features{
@@ -283,16 +257,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -300,14 +274,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -530,10 +498,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
//
// The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(in, src, options...)
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)
@@ -681,7 +649,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{
copy := api.CopyFile{
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
@@ -690,7 +658,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
resp, err = f.srv.CallJSON(&opts, &copy, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -851,46 +819,6 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
return nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
opts = rest.Opts{
Method: "PUT",
Path: "/folders/" + id,
Parameters: fieldsValue(),
}
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(remote)
if err != nil {
return "", err
}
if o.(*Object).publicLink != "" {
return o.(*Object).publicLink, nil
}
opts = rest.Opts{
Method: "PUT",
Path: "/files/" + o.(*Object).id,
Parameters: fieldsValue(),
}
}
shareLink := api.CreateSharedLink{}
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -955,7 +883,6 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
o.sha1 = info.SHA1
o.modTime = info.ModTime()
o.id = info.ID
o.publicLink = info.SharedLink.URL
return nil
}
@@ -1062,8 +989,8 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
var resp *http.Response
var result api.FolderItems
opts := rest.Opts{
Method: "POST",
Body: in,
Method: "POST",
Body: in,
MultipartMetadataName: "attributes",
MultipartContentName: "contents",
MultipartFileName: upload.Name,
@@ -1108,7 +1035,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
if size <= int64(uploadCutoff) {
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
@@ -1135,7 +1062,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -96,9 +96,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
// For discussion of this value see:
// https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries
maxTries := fs.Config.LowLevelRetries
const defaultDelay = 10
var tries int
outer:
@@ -211,8 +209,8 @@ outer:
}
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory

799
backend/cache/cache.go vendored

File diff suppressed because it is too large Load Diff

View File

@@ -4,9 +4,6 @@ package cache_test
import (
"bytes"
"encoding/base64"
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
@@ -15,26 +12,34 @@ import (
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"encoding/base64"
goflag "flag"
"fmt"
"runtime/debug"
"encoding/json"
"net/http"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
flag "github.com/spf13/pflag"
"github.com/stretchr/testify/require"
)
@@ -135,7 +140,7 @@ func TestInternalVfsCache(t *testing.T) {
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
@@ -387,10 +392,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
// write the object
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
require.Equal(t, o.Size(), testSize)
require.Equal(t, o.Size(), int64(testSize))
time.Sleep(time.Second * 3)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
require.NoError(t, err)
require.Equal(t, int64(len(checkSample)), o.Size())
@@ -690,11 +695,11 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
rcflags.Opt.Enabled = true
rc.Start(&rcflags.Opt)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
@@ -724,9 +729,13 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
m := make(map[string]string)
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res.Body.Close()
}()
_ = json.NewDecoder(res.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -736,21 +745,23 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
co, err = rootFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
li1, err := runInstance.list(t, rootFs, "")
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
m = make(map[string]string)
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res2.Body.Close()
}()
_ = json.NewDecoder(res2.Body).Decode(&m)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -758,13 +769,12 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
@@ -783,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
func TestInternalMaxChunkSizeRespected(t *testing.T) {
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
@@ -858,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
@@ -908,7 +918,10 @@ func TestInternalBug2117(t *testing.T) {
// run holds the remotes for a test run
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
allCfgMap map[string]string
allFlagMap map[string]string
runDefaultCfgMap map[string]string
runDefaultFlagMap map[string]string
mntDir string
tmpUploadDir string
useMount bool
@@ -932,16 +945,38 @@ func newRun() *run {
isMounted: false,
}
// Read in all the defaults for all the options
fsInfo, err := fs.Find("cache")
if err != nil {
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
r.allCfgMap = map[string]string{
"plex_url": "",
"plex_username": "",
"plex_password": "",
"chunk_size": cache.DefCacheChunkSize,
"info_age": cache.DefCacheInfoAge,
"chunk_total_size": cache.DefCacheTotalChunkSize,
}
r.runDefaultCfgMap = configmap.Simple{}
for _, option := range fsInfo.Options {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
r.allFlagMap = map[string]string{
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
"cache-db-purge": "true",
"cache-chunk-size": cache.DefCacheChunkSize,
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
"cache-info-age": cache.DefCacheInfoAge,
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
"cache-chunk-no-memory": "false",
"cache-rps": strconv.Itoa(cache.DefCacheRps),
"cache-writes": "false",
"cache-tmp-upload-path": "",
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
}
r.runDefaultCfgMap = make(map[string]string)
for key, value := range r.allCfgMap {
r.runDefaultCfgMap[key] = value
}
r.runDefaultFlagMap = make(map[string]string)
for key, value := range r.allFlagMap {
r.runDefaultFlagMap[key] = value
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
@@ -1051,22 +1086,28 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
fs.Config.LowLevelRetries = 1
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
if c, ok := cfg[k]; ok {
config.FileSet(cacheRemote, k, c)
} else {
config.FileSet(cacheRemote, k, v)
}
}
for k, v := range flags {
m.Set(k, v)
for k, v := range r.runDefaultFlagMap {
if c, ok := flags[k]; ok {
_ = flag.Set(k, c)
} else {
_ = flag.Set(k, v)
}
}
fs.Config.LowLevelRetries = 1
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
}
f, err := cache.NewFs(remote, id, m)
f, err := fs.NewFs(remote + ":" + id)
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
@@ -1116,6 +1157,9 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
}
r.tempFiles = nil
debug.FreeOSMemory()
for k, v := range r.runDefaultFlagMap {
_ = flag.Set(k, v)
}
}
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
@@ -1495,8 +1539,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
var err error
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
@@ -1506,8 +1549,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(src)
obj1, err := rootFs.NewObject(src)
if err != nil {
return err
}
@@ -1639,13 +1681,15 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
cfs, ok := f.(*cache.Fs)
if ok {
return cfs, nil
}
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
} else {
if f.Features().UnWrap != nil {
cfs, ok := f.Features().UnWrap().(*cache.Fs)
if ok {
return cfs, nil
}
}
}
return nil, errors.New("didn't found a cache fs")
}

View File

@@ -3,7 +3,6 @@
package cache_test
import (
"fmt"
"math/rand"
"os"
"path"
@@ -11,6 +10,8 @@ import (
"testing"
"time"
"fmt"
"github.com/ncw/rclone/backend/cache"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fs"
@@ -21,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
@@ -62,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
@@ -72,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
@@ -82,7 +83,7 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("one")
@@ -162,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir("test")
@@ -212,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -342,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -3,15 +3,16 @@
package cache
import (
"path"
"time"
"path"
"github.com/ncw/rclone/fs"
)
// Directory is a generic dir that stores basic information about it
type Directory struct {
Directory fs.Directory `json:"-"` // can be nil
fs.Directory `json:"-"`
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
@@ -124,14 +125,6 @@ func (d *Directory) Items() int64 {
return d.CacheItems
}
// ID returns the ID of the cached directory if known
func (d *Directory) ID() string {
if d.Directory == nil {
return ""
}
return d.Directory.ID()
}
var (
_ fs.Directory = (*Directory)(nil)
)

View File

@@ -5,11 +5,12 @@ package cache
import (
"fmt"
"io"
"sync"
"time"
"path"
"runtime"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
@@ -48,13 +49,12 @@ type Handle struct {
offset int64
seenOffsets map[int64]bool
mu sync.Mutex
workersWg sync.WaitGroup
confirmReading chan bool
workers int
maxWorkerID int
UseMemory bool
closed bool
reading bool
UseMemory bool
workers []*worker
closed bool
reading bool
}
// NewObjectHandle returns a new Handle for an existing Object
@@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
offset: 0,
preloadOffset: -1, // -1 to trigger the first preload
UseMemory: !cfs.opt.ChunkNoMemory,
UseMemory: cfs.chunkMemory,
reading: false,
}
r.seenOffsets = make(map[int64]bool)
r.memory = NewMemory(-1)
// create a larger buffer to queue up requests
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
r.confirmReading = make(chan bool)
r.startReadWorkers()
return r
@@ -95,10 +95,10 @@ func (r *Handle) String() string {
// startReadWorkers will start the worker pool
func (r *Handle) startReadWorkers() {
if r.workers > 0 {
if r.hasAtLeastOneWorker() {
return
}
totalWorkers := r.cacheFs().opt.TotalWorkers
totalWorkers := r.cacheFs().totalWorkers
if r.cacheFs().plexConnector.isConfigured() {
if !r.cacheFs().plexConnector.isConnected() {
@@ -117,27 +117,26 @@ func (r *Handle) startReadWorkers() {
// scaleOutWorkers will increase the worker pool count by the provided amount
func (r *Handle) scaleWorkers(desired int) {
current := r.workers
current := len(r.workers)
if current == desired {
return
}
if current > desired {
// scale in gracefully
for r.workers > desired {
for i := 0; i < current-desired; i++ {
r.preloadQueue <- -1
r.workers--
}
} else {
// scale out
for r.workers < desired {
for i := 0; i < desired-current; i++ {
w := &worker{
r: r,
id: r.maxWorkerID,
ch: r.preloadQueue,
id: current + i,
}
r.workersWg.Add(1)
r.workers++
r.maxWorkerID++
go w.run()
r.workers = append(r.workers, w)
}
}
// ignore first scale out from 0
@@ -149,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
func (r *Handle) confirmExternalReading() {
// if we have a max value of workers
// then we skip this step
if r.workers > 1 ||
if len(r.workers) > 1 ||
!r.cacheFs().plexConnector.isConfigured() {
return
}
@@ -157,7 +156,7 @@ func (r *Handle) confirmExternalReading() {
return
}
fs.Infof(r, "confirmed reading by external reader")
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
}
// queueOffset will send an offset to the workers if it's different from the last one
@@ -179,8 +178,8 @@ func (r *Handle) queueOffset(offset int64) {
}
}
for i := 0; i < r.workers; i++ {
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
for i := 0; i < len(r.workers); i++ {
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
if o < 0 || o >= r.cachedObject.Size() {
continue
}
@@ -194,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
}
}
func (r *Handle) hasAtLeastOneWorker() bool {
oneWorker := false
for i := 0; i < len(r.workers); i++ {
if r.workers[i].isRunning() {
oneWorker = true
}
}
return oneWorker
}
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
// it can be from transient or persistent cache
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
@@ -202,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
var err error
// we calculate the modulus of the requested offset with the size of a chunk
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
offset := chunkStart % r.cacheFs().chunkSize
// we align the start offset of the first chunk to a likely chunk in the storage
chunkStart = chunkStart - offset
@@ -219,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if !found {
// we're gonna give the workers a chance to pickup the chunk
// and retry a couple of times
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
for i := 0; i < r.cacheFs().readRetries*8; i++ {
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
if err == nil {
found = true
@@ -234,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
// not found in ram or
// the worker didn't managed to download the chunk in time so we abort and close the stream
if err != nil || len(data) == 0 || !found {
if r.workers == 0 {
if !r.hasAtLeastOneWorker() {
fs.Errorf(r, "out of workers")
return nil, io.ErrUnexpectedEOF
}
@@ -246,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
if offset > 0 {
if offset > int64(len(data)) {
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
return nil, io.ErrUnexpectedEOF
}
data = data[int(offset):]
@@ -295,7 +304,14 @@ func (r *Handle) Close() error {
close(r.preloadQueue)
r.closed = true
// wait for workers to complete their jobs before returning
r.workersWg.Wait()
waitCount := 3
for i := 0; i < len(r.workers); i++ {
waitIdx := 0
for r.workers[i].isRunning() && waitIdx < waitCount {
time.Sleep(time.Second)
waitIdx++
}
}
r.memory.db.Flush()
fs.Debugf(r, "cache reader closed %v", r.offset)
@@ -322,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
if chunkStart >= r.cacheFs().chunkSize {
chunkStart = chunkStart - r.cacheFs().chunkSize
}
r.queueOffset(chunkStart)
@@ -332,9 +348,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
}
type worker struct {
r *Handle
rc io.ReadCloser
id int
r *Handle
ch <-chan int64
rc io.ReadCloser
id int
running bool
mu sync.Mutex
}
// String is a representation of this worker
@@ -379,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
})
}
func (w *worker) isRunning() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.running
}
func (w *worker) setRunning(f bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.running = f
}
// run is the main loop for the worker which receives offsets to preload
func (w *worker) run() {
var err error
var data []byte
defer w.setRunning(false)
defer func() {
if w.rc != nil {
_ = w.rc.Close()
w.setRunning(false)
}
w.r.workersWg.Done()
}()
for {
chunkStart, open := <-w.r.preloadQueue
chunkStart, open := <-w.ch
w.setRunning(true)
if chunkStart < 0 || !open {
break
}
@@ -418,7 +451,7 @@ func (w *worker) run() {
}
}
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
// TODO: Remove this comment if it proves to be reliable for #1896
//if chunkEnd > w.r.cachedObject.Size() {
// chunkEnd = w.r.cachedObject.Size()
@@ -433,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
var data []byte
// stop retries
if retry >= w.r.cacheFs().opt.ReadRetries {
if retry >= w.r.cacheFs().readRetries {
return
}
// back-off between retries
@@ -579,7 +612,7 @@ func (b *backgroundWriter) run() {
return
}
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
time.Sleep(time.Second)
continue

View File

@@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
@@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
cacheType := objectInCache
parentFs := f.UnWrap()
if f.opt.TempWritePath != "" {
if f.tempWritePath != "" {
_, err := f.cache.SearchPendingUpload(fullRemote)
if err == nil { // queued for upload
cacheType = objectPendingUpload
@@ -153,7 +153,7 @@ func (o *Object) Storable() bool {
// 2. is not pending a notification from the wrapped fs
func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
if !isExpired && !isNotified {
return nil
}
@@ -208,17 +208,11 @@ func (o *Object) SetModTime(t time.Time) error {
// Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error
if o.Object == nil {
err = o.refreshFromSource(true)
} else {
err = o.refresh()
}
if err != nil {
if err := o.refreshFromSource(true); err != nil {
return nil, err
}
var err error
cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1
for _, option := range options {
@@ -243,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
@@ -280,7 +274,7 @@ func (o *Object) Remove() error {
return err
}
// pause background uploads if active
if o.CacheFs.opt.TempWritePath != "" {
if o.CacheFs.tempWritePath != "" {
o.CacheFs.backgroundRunner.pause()
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
@@ -359,13 +353,6 @@ func (o *Object) tempFileStartedUpload() bool {
return started
}
// UnWrap returns the Object that this Object is wrapping or
// nil if it isn't wrapping anything
func (o *Object) UnWrap() fs.Object {
return o.Object
}
var (
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.Object = (*Object)(nil)
)

46
backend/cache/plex.go vendored
View File

@@ -3,19 +3,21 @@
package cache
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
"sync"
"bytes"
"io/ioutil"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/ncw/rclone/fs/config"
"github.com/patrickmn/go-cache"
"golang.org/x/net/websocket"
)
@@ -53,17 +55,15 @@ type plexConnector struct {
username string
password string
token string
insecure bool
f *Fs
mu sync.Mutex
running bool
runningMu sync.Mutex
stateCache *cache.Cache
saveToken func(string)
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -75,16 +75,14 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
username: username,
password: password,
token: "",
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
saveToken: saveToken,
}
return pc, nil
}
// newPlexConnector connects to a Plex server and generates a token
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
if err != nil {
return nil, err
@@ -94,7 +92,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
f: f,
url: u,
token: token,
insecure: insecure,
stateCache: cache.New(time.Hour, time.Minute),
}
pc.listenWebsocket()
@@ -109,26 +106,14 @@ func (p *plexConnector) closeWebsocket() {
p.running = false
}
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
u := strings.TrimRight(strings.Replace(strings.Replace(
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
config, err := websocket.NewConfig(url, "http://localhost")
if err != nil {
return nil, err
}
if p.insecure {
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
}
return websocket.DialConfig(config)
}
func (p *plexConnector) listenWebsocket() {
p.runningMu.Lock()
defer p.runningMu.Unlock()
conn, err := p.websocketDial()
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
u = strings.Replace(u, "https://", "wss://", 1)
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
"", "http://localhost")
if err != nil {
fs.Errorf("plex", "%v", err)
return
@@ -224,9 +209,8 @@ func (p *plexConnector) authenticate() error {
}
p.token = token
if p.token != "" {
if p.saveToken != nil {
p.saveToken(p.token)
}
config.FileSet(p.f.Name(), "plex_token", p.token)
config.SaveConfig()
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
}
p.listenWebsocket()

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
)

View File

@@ -3,17 +3,20 @@
package cache
import (
"time"
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"io/ioutil"
"fmt"
bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
@@ -31,8 +34,7 @@ const (
// Features flags for this storage type
type Features struct {
PurgeDb bool // purge the db before starting
DbWaitTime time.Duration // time to wait for DB to be available
PurgeDb bool // purge the db before starting
}
var boltMap = make(map[string]*Persistent)
@@ -120,7 +122,7 @@ func (b *Persistent) connect() error {
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
}
@@ -340,7 +342,7 @@ func (b *Persistent) RemoveDir(fp string) error {
// ExpireDir will flush a CachedDirectory and all its objects from the objects
// chunks will remain as they are
func (b *Persistent) ExpireDir(cd *Directory) error {
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
t := time.Now().Add(cd.CacheFs.fileAge * -1)
cd.CacheTs = &t
// expire all parents
@@ -398,7 +400,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
@@ -427,7 +429,7 @@ func (b *Persistent) RemoveObject(fp string) error {
// ExpireObject will flush an Object and all its data if desired
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
err := b.AddObject(co)
if withData {
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
@@ -809,7 +811,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
@@ -1023,7 +1025,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
}
var queuedEntries []fs.Object
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo)
@@ -1049,7 +1051,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
err = bucket.Put([]byte(destPath), []byte(encoded))
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}

View File

@@ -17,9 +17,11 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
"github.com/rfjakob/eme"
)
// Constants
@@ -41,7 +43,6 @@ var (
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
@@ -285,9 +286,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
// not possible if decodeFilename() working correctly
return "", ErrorTooShortAfterDecode
}
if len(rawCiphertext) > 2048 {
return "", ErrorTooLongAfterDecode
}
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
if err != nil {
@@ -463,7 +461,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if int(newRune) < base {
newRune += 256
}
_, _ = result.WriteRune(newRune)
_, _ = result.WriteRune(rune(newRune))
default:
_, _ = result.WriteRune(runeValue)
@@ -748,7 +746,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
}
// retrieve the nonce
// retreive the nonce
fh.nonce.fromBuf(readBuf[fileMagicSize:])
fh.initialNonce = fh.nonce
return fh, nil

View File

@@ -24,7 +24,7 @@ func TestNewNameEncryptionMode(t *testing.T) {
{"off", NameEncryptionOff, ""},
{"standard", NameEncryptionStandard, ""},
{"obfuscate", NameEncryptionObfuscated, ""},
{"potato", NameEncryptionOff, "Unknown file name encryption mode \"potato\""},
{"potato", NameEncryptionMode(0), "Unknown file name encryption mode \"potato\""},
} {
actual, actualErr := NewNameEncryptionMode(test.in)
assert.Equal(t, actual, test.expected)
@@ -194,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
func TestDecryptSegment(t *testing.T) {
// We've tested the forwards above, now concentrate on the errors
longName := make([]byte, 3328)
for i := range longName {
longName[i] = 'a'
}
c, _ := newCipher(NameEncryptionStandard, "", "", true)
for _, test := range []struct {
in string
@@ -205,7 +201,6 @@ func TestDecryptSegment(t *testing.T) {
}{
{"64=", ErrorBadBase32Encoding},
{"!", base32.CorruptInputError(0)},
{string(longName), ErrorTooLongAfterDecode},
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},

View File

@@ -4,20 +4,25 @@ package crypt
import (
"fmt"
"io"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors"
)
// Globals
var (
// Flags
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -25,13 +30,11 @@ func init() {
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
Required: true,
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
}, {
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Default: "standard",
Name: "filename_encryption",
Help: "How to encrypt the filenames.",
Examples: []fs.OptionExample{
{
Value: "off",
@@ -45,9 +48,8 @@ func init() {
},
},
}, {
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Examples: []fs.OptionExample{
{
Value: "true",
@@ -66,98 +68,68 @@ func init() {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
If this flag is set then for each file that the remote is asked to
list, it will log (at level INFO) a line stating the decrypted file
name and the encrypted file name.
This is so you can work out which encrypted names are which decrypted
names just in case you need to do something with the encrypted file
names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
Optional: true,
}},
})
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
// NewCipher constructs a Cipher for the given config name
func NewCipher(name string) (Cipher, error) {
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
if err != nil {
return nil, err
}
if opt.Password == "" {
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
if err != nil {
return nil, err
}
password := config.FileGet(name, "password", "")
if password == "" {
return nil, errors.New("password not set in config file")
}
password, err := obscure.Reveal(opt.Password)
password, err = obscure.Reveal(password)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password")
}
var salt string
if opt.Password2 != "" {
salt, err = obscure.Reveal(opt.Password2)
salt := config.FileGet(name, "password2", "")
if salt != "" {
salt, err = obscure.Reveal(salt)
if err != nil {
return nil, errors.Wrap(err, "failed to decrypt password2")
}
}
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
if err != nil {
return nil, errors.Wrap(err, "failed to make cipher")
}
return cipher, nil
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, rpath string) (fs.Fs, error) {
cipher, err := NewCipher(name)
if err != nil {
return nil, err
}
return newCipherForConfig(opt)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
cipher, err := newCipherForConfig(opt)
if err != nil {
return nil, err
}
remote := opt.Remote
remote := config.FileGet(name, "remote")
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
wrappedFs, err := fs.NewFs(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = fs.NewFs(remotePath)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
}
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
cipher: cipher,
}
// the features here are ones we could support, and they are
@@ -173,7 +145,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
@@ -182,29 +154,18 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
}
notifyFunc(decrypted, entryType)
}
doChangeNotify(wrappedNotifyFunc, pollInterval)
return doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ShowMapping bool `config:"show_mapping"`
}
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
name string
root string
opt Options
features *fs.Features // optional features
cipher Cipher
}
@@ -237,7 +198,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return
}
if f.opt.ShowMapping {
if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
@@ -251,7 +212,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return
}
if f.opt.ShowMapping {
if *cryptShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(dir))
@@ -344,13 +305,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
if err != nil {
return nil, err
}
// unwrap the accounting
var wrap accounting.WrapFn
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
// add the hasher
wrappedIn = io.TeeReader(wrappedIn, hasher)
// wrap the accounting back on
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
@@ -555,7 +510,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
// src with it, and calcuates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
@@ -723,15 +678,15 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(dir)
new := fs.NewDirCopy(dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
newDir.SetRemote(decryptedRemote)
new.SetRemote(decryptedRemote)
}
return newDir
return new
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source

View File

@@ -7,30 +7,13 @@ import (
"testing"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
})
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
@@ -47,9 +30,6 @@ func TestStandard(t *testing.T) {
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
@@ -66,9 +46,6 @@ func TestOff(t *testing.T) {
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{

File diff suppressed because it is too large Load Diff

View File

@@ -1,82 +1,63 @@
// +build go1.9
package drive
import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"mime"
"path/filepath"
"strings"
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"google.golang.org/api/drive/v3"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
)
func TestDriveScopes(t *testing.T) {
for _, test := range []struct {
in string
want []string
wantFlag bool
}{
{"", []string{
"https://www.googleapis.com/auth/drive",
}, false},
{" drive.file , drive.readonly", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.readonly",
}, false},
{" drive.file , drive.appfolder", []string{
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.appfolder",
}, true},
} {
got := driveScopes(test.in)
assert.Equal(t, test.want, got, test.in)
gotFlag := driveScopesContainsAppFolder(got)
assert.Equal(t, test.wantFlag, gotFlag, test.in)
}
}
const exampleExportFormats = `{
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}`
/*
var additionalMimeTypes = map[string]string{
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
"application/vnd.ms-powerpoint": ".ppt",
"application/vnd.ms-word.document.macroenabled.12": ".docm",
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
"application/vnd.sun.xml.writer": ".sxw",
"text/richtext": ".rtf",
}
*/
var exportFormats map[string][]string
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`
}
require.NoError(t, err)
require.NoError(t, json.Unmarshal(buf, &about))
_exportFormats = fixMimeTypeMap(about.ExportFormats)
_importFormats = fixMimeTypeMap(about.ImportFormats)
func TestInternalLoadExampleExportFormats(t *testing.T) {
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
}
func TestInternalParseExtensions(t *testing.T) {
@@ -85,204 +66,47 @@ func TestInternalParseExtensions(t *testing.T) {
want []string
wantErr error
}{
{"doc", []string{".doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
{"doc", []string{"doc"}, nil},
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
} {
extensions, _, gotErr := parseExtensions(test.in)
f := new(Fs)
gotErr := f.parseExtensions(test.in)
if test.wantErr == nil {
assert.NoError(t, gotErr)
} else {
assert.EqualError(t, gotErr, test.wantErr.Error())
}
assert.Equal(t, test.want, extensions)
assert.Equal(t, test.want, f.extensions)
}
// Test it is appending
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
assert.NoError(t, gotErr)
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
f := new(Fs)
assert.Nil(t, f.parseExtensions("docx,svg"))
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
}
func TestInternalFindExportFormat(t *testing.T) {
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
}
item := new(drive.File)
item.MimeType = "application/vnd.google-apps.document"
for _, test := range []struct {
extensions []string
wantExtension string
wantMimeType string
}{
{[]string{}, "", ""},
{[]string{".pdf"}, ".pdf", "application/pdf"},
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
{[]string{".xls", ".csv", ".svg"}, "", ""},
{[]string{"pdf"}, "pdf", "application/pdf"},
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
{[]string{"xls", "csv", "svg"}, "", ""},
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
f.extensions = test.extensions
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)
} else {
assert.Equal(t, "", gotFilename)
}
assert.Equal(t, test.wantMimeType, gotMimeType)
assert.Equal(t, true, gotIsDocument)
}
}
func TestMimeTypesToExtension(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
extensions, err := mime.ExtensionsByType(mimeType)
assert.NoError(t, err)
assert.Contains(t, extensions, extension)
}
}
func TestExtensionToMimeType(t *testing.T) {
for mimeType, extension := range _mimeTypeToExtension {
gotMimeType := mime.TypeByExtension(extension)
mediatype, _, err := mime.ParseMediaType(gotMimeType)
assert.NoError(t, err)
assert.Equal(t, mimeType, mediatype)
}
}
func TestExtensionsForExportFormats(t *testing.T) {
if _exportFormats == nil {
t.Error("exportFormats == nil")
}
for fromMT, toMTs := range _exportFormats {
for _, toMT := range toMTs {
if !isInternalMimeType(toMT) {
extensions, err := mime.ExtensionsByType(toMT)
assert.NoError(t, err, "invalid MIME type %q", toMT)
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
}
}
}
}
func TestExtensionsForImportFormats(t *testing.T) {
t.Skip()
if _importFormats == nil {
t.Error("_importFormats == nil")
}
for fromMT := range _importFormats {
if !isInternalMimeType(fromMT) {
extensions, err := mime.ExtensionsByType(fromMT)
assert.NoError(t, err, "invalid MIME type %q", fromMT)
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
}
}
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
oldAllow := f.opt.AllowImportNameChange
f.opt.AllowImportNameChange = true
defer func() {
f.opt.AllowImportNameChange = oldAllow
}()
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
require.NoError(t, err)
testFilesFs, err := fs.NewFs(testFilesPath)
require.NoError(t, err)
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err)
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err)
}
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err)
obj, err := f.NewObject("example2.txt")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
for _, excerpt := range []string{
"Lorem ipsum dolor sit amet, consectetur",
"porta at ultrices in, consectetur at augue.",
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
var buf bytes.Buffer
var err error
f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err)
obj, err := f.NewObject("example2.link.html")
require.NoError(t, err)
rc, err := obj.Open()
require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }()
_, err = io.Copy(&buf, rc)
require.NoError(t, err)
text := buf.String()
require.True(t, strings.HasPrefix(text, "<html>"))
require.True(t, strings.HasSuffix(text, "</html>\n"))
for _, excerpt := range []string{
`<meta http-equiv="refresh"`,
`Loading <a href="`,
} {
require.Contains(t, text, excerpt)
}
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
f.InternalTestDocumentImport(t)
t.Run("DocumentUpdate", func(t *testing.T) {
f.InternalTestDocumentUpdate(t)
t.Run("DocumentExport", func(t *testing.T) {
f.InternalTestDocumentExport(t)
t.Run("DocumentLink", func(t *testing.T) {
f.InternalTestDocumentLink(t)
})
})
})
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,13 +1,10 @@
// Test Drive filesystem interface
// +build go1.9
package drive
package drive_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -15,23 +12,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
NilObject: (*drive.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -1,6 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -1,178 +0,0 @@
{
"importFormats": {
"text/tab-separated-values": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpeg": [
"application/vnd.google-apps.document"
],
"image/bmp": [
"application/vnd.google-apps.document"
],
"image/gif": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-excel.sheet.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-word.template.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
"application/vnd.google-apps.document"
],
"image/pjpeg": [
"application/vnd.google-apps.document"
],
"application/vnd.google-apps.script+text/plain": [
"application/vnd.google-apps.script"
],
"application/vnd.ms-excel": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.sun.xml.writer": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-word.document.macroenabled.12": [
"application/vnd.google-apps.document"
],
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/rtf": [
"application/vnd.google-apps.document"
],
"text/plain": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/x-vnd.oasis.opendocument.spreadsheet": [
"application/vnd.google-apps.spreadsheet"
],
"image/png": [
"application/vnd.google-apps.document"
],
"application/x-vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/msword": [
"application/vnd.google-apps.document"
],
"application/pdf": [
"application/vnd.google-apps.document"
],
"application/json": [
"application/vnd.google-apps.script"
],
"application/x-msmetafile": [
"application/vnd.google-apps.drawing"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.ms-powerpoint": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-excel.template.macroenabled.12": [
"application/vnd.google-apps.spreadsheet"
],
"image/x-bmp": [
"application/vnd.google-apps.document"
],
"application/rtf": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.template": [
"application/vnd.google-apps.presentation"
],
"image/x-png": [
"application/vnd.google-apps.document"
],
"text/html": [
"application/vnd.google-apps.document"
],
"application/vnd.oasis.opendocument.text": [
"application/vnd.google-apps.document"
],
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
"application/vnd.google-apps.presentation"
],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.google-apps.script+json": [
"application/vnd.google-apps.script"
],
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
"application/vnd.google-apps.presentation"
],
"application/vnd.ms-powerpoint.template.macroenabled.12": [
"application/vnd.google-apps.presentation"
],
"text/csv": [
"application/vnd.google-apps.spreadsheet"
],
"application/vnd.oasis.opendocument.presentation": [
"application/vnd.google-apps.presentation"
],
"image/jpg": [
"application/vnd.google-apps.document"
],
"text/richtext": [
"application/vnd.google-apps.document"
]
},
"exportFormats": {
"application/vnd.google-apps.document": [
"application/rtf",
"application/vnd.oasis.opendocument.text",
"text/html",
"application/pdf",
"application/epub+zip",
"application/zip",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain"
],
"application/vnd.google-apps.spreadsheet": [
"application/x-vnd.oasis.opendocument.spreadsheet",
"text/tab-separated-values",
"application/pdf",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"text/csv",
"application/zip",
"application/vnd.oasis.opendocument.spreadsheet"
],
"application/vnd.google-apps.jam": [
"application/pdf"
],
"application/vnd.google-apps.script": [
"application/vnd.google-apps.script+json"
],
"application/vnd.google-apps.presentation": [
"application/vnd.oasis.opendocument.presentation",
"application/pdf",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"text/plain"
],
"application/vnd.google-apps.form": [
"application/zip"
],
"application/vnd.google-apps.drawing": [
"image/svg+xml",
"image/png",
"application/pdf",
"image/jpeg"
]
}
}

View File

@@ -8,8 +8,6 @@
//
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive
import (
@@ -52,16 +50,15 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
"fields": {partialFields},
}
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
params := make(url.Values)
params.Set("alt", "json")
params.Set("uploadType", "resumable")
params.Set("fields", partialFields)
if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever {
if *driveKeepRevisionForever {
params.Set("keepRevisionForever", "true")
}
urls := "https://www.googleapis.com/upload/drive/v3/files"
@@ -185,7 +182,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
// been 200 OK.
//
// So parse the response out of the body. We aren't expecting
// any other 2xx codes, so we parse it unconditionally on
// any other 2xx codes, so we parse it unconditionaly on
// StatusCode
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
return 598, err
@@ -200,11 +197,11 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
buf := make([]byte, int(chunkSize))
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)

View File

@@ -31,16 +31,13 @@ import (
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -58,6 +55,24 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
@@ -81,26 +96,8 @@ const (
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// by default.
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
)
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
)
// Register with Fs
@@ -109,58 +106,34 @@ func init() {
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
Config: func(name string) {
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Dropbox App Client Id\nLeave blank normally.",
Help: "Dropbox App Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
Help: "Dropbox App Client Secret - leave blank normally.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
}
// Fs represents a remote dropbox server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv files.Client // the connection to the dropbox server
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
pacer *pacer.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
}
@@ -204,59 +177,23 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
// FIXME there is probably a better way of doing this!
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
return true, err
}
return fserrors.ShouldRetry(err), err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (fs.Fs, error) {
if uploadChunkSize > maxUploadChunkSize {
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
}
// Convert the old token if it exists. The old token was just
// just a string, the new one is a JSON blob
oldToken, ok := m.Get(config.ConfigToken)
oldToken = strings.TrimSpace(oldToken)
if ok && oldToken != "" && oldToken[0] != '{' {
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
if oldToken != "" && oldToken[0] != '{' {
fs.Infof(name, "Converting token to new format")
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
@@ -265,44 +202,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
}
f := &Fs{
name: name,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
user.Tag = "email"
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
memberIds, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)
@@ -998,7 +911,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunkSize := int64(uploadChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
@@ -1113,7 +1026,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(uploadChunkSize) || size == -1 {
entry, err = o.uploadChunked(in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {

View File

@@ -1,10 +1,10 @@
// Test Dropbox filesystem interface
package dropbox
package dropbox_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/dropbox"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,15 +12,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
NilObject: (*dropbox.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -4,18 +4,18 @@ package ftp
import (
"io"
"net/textproto"
"net/url"
"os"
"path"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
@@ -30,46 +30,33 @@ func init() {
{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Optional: false,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
Optional: true,
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
Name: "port",
Help: "FTP port, leave blank to use default (21) ",
Optional: true,
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
Optional: false,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
Concurrency int `config:"concurrency"`
}
// Fs represents a remote FTP server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
url string
user string
@@ -77,7 +64,6 @@ type Fs struct {
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
tokens *pacer.TokenDispenser
}
// Object describes an FTP file
@@ -136,9 +122,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -158,9 +141,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
// if err is not nil then it checks the connection is alive using a
// NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
c := *pc
*pc = nil
if err != nil {
@@ -180,39 +160,56 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
f.poolMu.Unlock()
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
// FIXME Convert the old scheme used for the first beta - remove after release
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
fs.Infof(name, "Converting old configuration")
u, err := url.Parse(ftpURL)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
}
parts := strings.Split(u.Host, ":")
config.FileSet(name, "host", parts[0])
if len(parts) > 1 {
config.FileSet(name, "port", parts[1])
}
config.FileSet(name, "host", u.Host)
config.FileSet(name, "user", config.FileGet(name, "username"))
config.FileSet(name, "pass", config.FileGet(name, "password"))
config.FileDeleteKey(name, "username")
config.FileDeleteKey(name, "password")
config.FileDeleteKey(name, "url")
config.SaveConfig()
if u.Path != "" && u.Path != "/" {
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
}
}
pass, err := obscure.Reveal(opt.Pass)
host := config.FileGet(name, "host")
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
port := config.FileGet(name, "port")
pass, err = obscure.Reveal(pass)
if err != nil {
return nil, errors.Wrap(err, "NewFS decrypt password")
}
user := opt.User
if user == "" {
user = os.Getenv("USER")
}
port := opt.Port
if port == "" {
port = "21"
}
dialAddr := opt.Host + ":" + port
dialAddr := host + ":" + port
u := "ftp://" + path.Join(dialAddr+"/", root)
f := &Fs{
name: name,
root: root,
opt: *opt,
url: u,
user: user,
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -661,21 +658,7 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
// Close the FTP reader and return the connection to the pool
func (f *ftpReadCloser) Close() error {
var err error
errchan := make(chan error, 1)
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
return nil
}
err := f.rc.Close()
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
@@ -733,11 +716,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed
remove := func() {
// Give the FTP server a chance to get its internal state in order after the error.
// The error may have been local in which case we closed the connection. The server
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second)
removeErr := o.Remove()
if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr)
@@ -751,7 +729,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
err = c.Stor(path, in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
_ = c.Quit()
remove()
return errors.Wrap(err, "update stor")
}

View File

@@ -1,7 +1,4 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage
/*
@@ -16,7 +13,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
*/
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
@@ -33,8 +29,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
@@ -46,8 +41,6 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
// NOTE: This API is deprecated
storage "google.golang.org/api/storage/v1"
)
@@ -62,6 +55,8 @@ const (
)
var (
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
// Description of how to auth for this app
storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageFullControlScope},
@@ -76,36 +71,29 @@ var (
func init() {
fs.Register(&fs.RegInfo{
Name: "google cloud storage",
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
if saFile != "" || saCreds != "" {
Config: func(name string) {
if config.FileGet(name, "service_account_file") != "" {
return
}
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
err := oauthutil.Config("google cloud storage", name, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
Help: "Google Application Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
Help: "Google Application Client Secret - leave blank normally.",
}, {
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
@@ -147,22 +135,6 @@ func init() {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
Help: `Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
`,
Default: false,
}, {
Name: "location",
Help: "Location for the newly created buckets.",
@@ -181,36 +153,21 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "asia-east1",
Help: "Taiwan.",
}, {
Value: "asia-east2",
Help: "Hong Kong.",
}, {
Value: "asia-northeast1",
Help: "Tokyo.",
}, {
Value: "asia-south1",
Help: "Mumbai.",
}, {
Value: "asia-southeast1",
Help: "Singapore.",
}, {
Value: "australia-southeast1",
Help: "Sydney.",
}, {
Value: "europe-north1",
Help: "Finland.",
}, {
Value: "europe-west1",
Help: "Belgium.",
}, {
Value: "europe-west2",
Help: "London.",
}, {
Value: "europe-west3",
Help: "Frankfurt.",
}, {
Value: "europe-west4",
Help: "Netherlands.",
}, {
Value: "us-central1",
Help: "Iowa.",
@@ -223,9 +180,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-west1",
Help: "Oregon.",
}, {
Value: "us-west2",
Help: "California.",
}},
}, {
Name: "storage_class",
@@ -253,30 +207,22 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
projectNumber string // used for finding buckets
objectACL string // used when creating new objects
bucketACL string // used when creating new buckets
location string // location of new buckets
storageClass string // storage class of new buckets
pacer *pacer.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -320,7 +266,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry determines whether a given err rates being retried
// shouldRetry determines whehter a given err rates being retried
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
@@ -368,44 +314,30 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
var oAuthClient *http.Client
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.ObjectACL == "" {
opt.ObjectACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = "private"
}
var err error
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
serviceAccountPath := config.FileGet(name, "service_account_file")
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
serviceAccountCreds = loadedCreds
}
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if len(serviceAccountCreds) > 0 {
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
}
} else {
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
if err != nil {
ctx := context.Background()
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
}
}
@@ -415,17 +347,33 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
name: name,
bucket: bucket,
root: directory,
projectNumber: config.FileGet(name, "project_number"),
objectACL: config.FileGet(name, "object_acl"),
bucketACL: config.FileGet(name, "bucket_acl"),
location: config.FileGet(name, "location"),
storageClass: config.FileGet(name, "storage_class"),
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.objectACL == "" {
f.objectACL = "private"
}
if f.bucketACL == "" {
f.bucketACL = "private"
}
if *gcsLocation != "" {
f.location = *gcsLocation
}
if *gcsStorageClass != "" {
f.storageClass = *gcsStorageClass
}
// Create a new authorized Drive client.
f.client = oAuthClient
@@ -602,10 +550,10 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
if f.projectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
@@ -724,28 +672,17 @@ func (f *Fs) Mkdir(dir string) (err error) {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
if f.projectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
Location: f.location,
StorageClass: f.storageClass,
}
err = f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Do()
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
return shouldRetry(err)
})
if err == nil {
@@ -1011,11 +948,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Do()
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
return shouldRetry(err)
})
if err != nil {

View File

@@ -1,7 +1,4 @@
// Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test
import (

View File

@@ -1,6 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -6,7 +6,6 @@ package http
import (
"io"
"mime"
"net/http"
"net/url"
"path"
@@ -15,8 +14,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
@@ -37,47 +35,21 @@ func init() {
Options: []fs.Option{{
Name: "url",
Help: "URL of http host to connect to",
Required: true,
Optional: false,
Examples: []fs.OptionExample{{
Value: "https://example.com",
Help: "Connect to example.com",
}, {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Use this if your target website does not use / on the end of
directories.
A / on the end of a path is how rclone normally tells the difference
between files and directories. If this flag is set, then rclone will
treat all files with Content-Type: text/html as directories and read
URLs from them rather than downloading them.
Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string
root string
features *fs.Features // optional features
opt Options // options for this backend
endpoint *url.URL
endpointURL string // endpoint as a string
httpClient *http.Client
@@ -106,20 +78,14 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
func NewFs(name, root string) (fs.Fs, error) {
endpoint := config.FileGet(name, "url")
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
// Parse the endpoint and stick the root onto it
base, err := url.Parse(opt.Endpoint)
base, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
@@ -164,7 +130,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
opt: *opt,
httpClient: client,
endpoint: u,
endpointURL: u.String(),
@@ -214,7 +179,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
}
err := o.stat()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Stat failed")
}
return o, nil
}
@@ -269,7 +234,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// calculate the name relative to the base
name = u.Path[len(base.Path):]
// mustn't be empty
// musn't be empty
if name == "" {
return "", errNameIsEmpty
}
@@ -288,20 +253,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
if err != nil {
return nil, err
}
var (
walk func(*html.Node)
seen = make(map[string]struct{})
)
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
if _, found := seen[name]; !found {
names = append(names, name)
seen[name] = struct{}{}
}
names = append(names, name)
}
break
}
@@ -326,16 +285,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
res, err := f.httpClient.Get(URL)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
if err == nil && res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
@@ -379,16 +336,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
if err = file.stat(); err != nil {
fs.Debugf(remote, "skipping because of error: %v", err)
continue
}
entries = append(entries, file)
}
}
return entries, nil
@@ -450,9 +402,6 @@ func (o *Object) url() string {
func (o *Object) stat() error {
url := o.url()
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
err = statusError(res, err)
if err != nil {
return errors.Wrap(err, "failed to stat")
@@ -464,16 +413,6 @@ func (o *Object) stat() error {
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
}
}
return nil
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert"
@@ -30,7 +29,7 @@ var (
)
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
func prepareServer(t *testing.T) func() {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
@@ -42,30 +41,25 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
// config.FileSet(remoteName, "type", "http")
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
config.FileSet(remoteName, "type", "http")
config.FileSet(remoteName, "url", ts.URL)
// return a function to tidy up
return m, ts.Close
return ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(remoteName, "", m)
f, err := NewFs(remoteName, "")
require.NoError(t, err)
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
func testListRoot(t *testing.T, f fs.Fs) {
entries, err := f.List("")
require.NoError(t, err)
@@ -93,29 +87,15 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
if noSlash {
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
assert.Equal(t, int64(7), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
testListRoot(t, f)
}
func TestListSubDir(t *testing.T) {
@@ -158,11 +138,6 @@ func TestNewObject(t *testing.T) {
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject("not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {
@@ -202,20 +177,20 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
m, tidy := prepareServer(t)
tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "one%.txt", m)
f, err := NewFs(remoteName, "one%.txt")
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {
m, tidy := prepareServer(t)
tidy := prepareServer(t)
defer tidy()
f, err := NewFs(remoteName, "three/underthree.txt", m)
f, err := NewFs(remoteName, "three/underthree.txt")
assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List("")

View File

@@ -1 +1 @@
<a href="two.html/file.txt">file.txt</a>
potato

View File

@@ -2,9 +2,7 @@ package hubic
import (
"net/http"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
@@ -23,17 +21,12 @@ func newAuth(f *Fs) *auth {
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials()
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
err := a.f.getCredentials()
if err != nil {
return nil, err
}
return nil, err
return nil, nil
}
// Response parses the result of an http request

View File

@@ -9,17 +9,13 @@ package hubic
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
@@ -56,19 +52,19 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig)
Config: func(name string) {
err := oauthutil.Config("hubic", name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append([]fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
Help: "Hubic Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
Help: "Hubic Client Secret - leave blank normally.",
}},
})
}
@@ -126,9 +122,7 @@ func (f *Fs) getCredentials() (err error) {
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
return errors.Errorf("failed to get credentials: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
@@ -151,8 +145,8 @@ func (f *Fs) getCredentials() (err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
func NewFs(name, root string) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(name, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Hubic")
}
@@ -173,15 +167,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "error authenticating swift connection")
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}

View File

@@ -1,316 +0,0 @@
package api
import (
"encoding/xml"
"fmt"
"time"
"github.com/pkg/errors"
)
const (
// default time format for almost all request and responses
timeFormat = "2006-01-02-T15:04:05Z0700"
// the API server seems to use a different format
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
)
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
type Time time.Time
// UnmarshalXML turns XML into a Time
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
if v == "" {
*t = Time(time.Time{})
return nil
}
newTime, err := time.Parse(timeFormat, v)
if err == nil {
*t = Time(newTime)
}
return err
}
// MarshalXML turns a Time into XML
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(t.String(), start)
}
// Return Time string in Jottacloud format
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// Flag is a hacky type for checking if an attribute is present
type Flag bool
// UnmarshalXMLAttr sets Flag to true if the attribute is present
func (f *Flag) UnmarshalXMLAttr(attr xml.Attr) error {
*f = true
return nil
}
// MarshalXMLAttr : Do not use
func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
attr := xml.Attr{
Name: name,
Value: "false",
}
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
<user time="2018-07-18-T21:39:10Z" host="dn-132">
<username>12qh1wsht8cssxdtwl15rqh9</username>
<account-type>free</account-type>
<locked>false</locked>
<capacity>5368709120</capacity>
<max-devices>-1</max-devices>
<max-mobile-devices>-1</max-mobile-devices>
<usage>0</usage>
<read-locked>false</read-locked>
<write-locked>false</write-locked>
<quota-write-locked>false</quota-write-locked>
<enable-sync>true</enable-sync>
<enable-foldershare>true</enable-foldershare>
<devices>
<device>
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</device>
</devices>
</user>
*/
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
Capacity int64 `xml:"capacity"`
MaxDevices int `xml:"max-devices"`
MaxMobileDevices int `xml:"max-mobile-devices"`
Usage int64 `xml:"usage"`
ReadLocked bool `xml:"read-locked"`
WriteLocked bool `xml:"write-locked"`
QuotaWriteLocked bool `xml:"quota-write-locked"`
EnableSync bool `xml:"enable-sync"`
EnableFolderShare bool `xml:"enable-foldershare"`
Devices []JottaDevice `xml:"devices>device"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>
<device time="2018-07-23-T20:21:50Z" host="dn-158">
<name xml:space="preserve">Jotta</name>
<display_name xml:space="preserve">Jotta</display_name>
<type>JOTTA</type>
<sid>5c458d01-9eaf-4f23-8d3c-2486fd9704d8</sid>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<mountPoints>
<mountPoint>
<name xml:space="preserve">Archive</name>
<size>0</size>
<modified>2018-07-15-T22:04:59Z</modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Shared</name>
<size>0</size>
<modified></modified>
</mountPoint>
<mountPoint>
<name xml:space="preserve">Sync</name>
<size>0</size>
<modified></modified>
</mountPoint>
</mountPoints>
<metadata first="" max="" total="3" num_mountpoints="3"/>
</device>
*/
// JottaDevice represents a Jottacloud Device
type JottaDevice struct {
Name string `xml:"name"`
DisplayName string `xml:"display_name"`
Type string `xml:"type"`
Sid string `xml:"sid"`
Size int64 `xml:"size"`
User string `xml:"user"`
MountPoints []JottaMountPoint `xml:"mountPoints>mountPoint"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>
<mountPoint time="2018-07-24-T20:35:02Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>12qh1wsht8cssxdtwl15rqh9</user>
<folders>
<folder name="test"/>
</folders>
<metadata first="" max="" total="1" num_folders="1" num_files="0"/>
</mountPoint>
*/
// JottaMountPoint represents a Jottacloud mountpoint
type JottaMountPoint struct {
Name string `xml:"name"`
Size int64 `xml:"size"`
Device string `xml:"device"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/<folder>
<folder name="test" time="2018-07-24-T20:41:37Z" host="dn-158">
<path xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</path>
<abspath xml:space="preserve">/12qh1wsht8cssxdtwl15rqh9/Jotta/Sync</abspath>
<folders>
<folder name="t2"/>c
</folders>
<files>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
</files>
<metadata first="" max="" total="2" num_folders="1" num_files="1"/>
</folder>
*/
// JottaFolder represents a JottacloudFolder
type JottaFolder struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
Path string `xml:"path"`
CreatedAt Time `xml:"created"`
ModifiedAt Time `xml:"modified"`
Updated Time `xml:"updated"`
Folders []JottaFolder `xml:"folders>folder"`
Files []JottaFile `xml:"files>file"`
}
/*
GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
<file name="block.csv" uuid="f6553cd4-1135-48fe-8e6a-bb9565c50ef2">
<currentRevision>
<number>1</number>
<state>COMPLETED</state>
<created>2018-07-05-T15:08:02Z</created>
<modified>2018-07-05-T15:08:02Z</modified>
<mime>application/octet-stream</mime>
<size>30827730</size>
<md5>1e8a7b728ab678048df00075c9507158</md5>
<updated>2018-07-24-T20:41:10Z</updated>
</currentRevision>
</file>
*/
// JottaFile represents a Jottacloud file
type JottaFile struct {
XMLName xml.Name
Name string `xml:"name,attr"`
Deleted Flag `xml:"deleted,attr"`
PublicSharePath string `xml:"publicSharePath"`
State string `xml:"currentRevision>state"`
CreatedAt Time `xml:"currentRevision>created"`
ModifiedAt Time `xml:"currentRevision>modified"`
Updated Time `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`
}
// Error is a custom Error for wrapping Jottacloud error responses
type Error struct {
StatusCode int `xml:"code"`
Message string `xml:"message"`
Reason string `xml:"reason"`
Cause string `xml:"cause"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Reason != "" {
out += fmt.Sprintf(" (%+v)", e.Reason)
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}

View File

@@ -1,29 +0,0 @@
package api
import (
"encoding/xml"
"testing"
"time"
)
func TestMountpointEmptyModificationTime(t *testing.T) {
mountpoint := `
<mountPoint time="2018-08-12-T09:58:24Z" host="dn-157">
<name xml:space="preserve">Sync</name>
<path xml:space="preserve">/foo/Jotta</path>
<abspath xml:space="preserve">/foo/Jotta</abspath>
<size>0</size>
<modified></modified>
<device>Jotta</device>
<user>foo</user>
<metadata first="" max="" total="0" num_folders="0" num_files="0"/>
</mountPoint>
`
var jf JottaFolder
if err := xml.Unmarshal([]byte(mountpoint), &jf); err != nil {
t.Fatal(err)
}
if !time.Time(jf.ModifiedAt).IsZero() {
t.Errorf("got non-zero time, want zero")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +0,0 @@
package jottacloud
import (
"crypto/md5"
"fmt"
"io"
"testing"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadMD5(t *testing.T) {
// Check readMD5 for different size and threshold
for _, size := range []int64{0, 1024, 10 * 1024, 100 * 1024} {
t.Run(fmt.Sprintf("%d", size), func(t *testing.T) {
hasher := md5.New()
n, err := io.Copy(hasher, readers.NewPatternReader(size))
require.NoError(t, err)
assert.Equal(t, n, size)
wantMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
for _, threshold := range []int64{512, 1024, 10 * 1024, 20 * 1024} {
t.Run(fmt.Sprintf("%d", threshold), func(t *testing.T) {
in := readers.NewPatternReader(size)
gotMD5, out, cleanup, err := readMD5(in, size, threshold)
defer cleanup()
require.NoError(t, err)
assert.Equal(t, wantMD5, gotMD5)
// check md5hash of out
hasher := md5.New()
n, err := io.Copy(hasher, out)
require.NoError(t, err)
assert.Equal(t, n, size)
outMD5 := fmt.Sprintf("%x", hasher.Sum(nil))
assert.Equal(t, wantMD5, outMD5)
})
}
})
}
}

View File

@@ -1,17 +0,0 @@
// Test Box filesystem interface
package jottacloud_test
import (
"testing"
"github.com/ncw/rclone/backend/jottacloud"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestJottacloud:",
NilObject: (*jottacloud.Object)(nil),
})
}

View File

@@ -1,77 +0,0 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaCloud reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -1,28 +0,0 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,589 +0,0 @@
package koofr
import (
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
}
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
root string
opt Options
features *fs.Features
client *koofrclient.KoofrClient
}
// An Object on the remote Koofr Fs
type Object struct {
fs *Fs
remote string
info koofrclient.FileInfo
}
func base(pth string) string {
rv := path.Base(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
func dir(pth string) string {
rv := path.Dir(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
// String returns a string representation of the remote Object
func (o *Object) String() string {
return o.remote
}
// Remote returns the remote path of the Object, relative to Fs root
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
// Size return the size of the Object in bytes
func (o *Object) Size() int64 {
return o.info.Size
}
// Fs returns a reference to the Koofr Fs containing the Object
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
return "", nil
}
// fullPath returns full path of the remote Object (including Fs root)
func (o *Object) fullPath() string {
return o.fs.fullPath(o.remote)
}
// Storable returns true if the Object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
func (o *Object) SetModTime(mtime time.Time) error {
return nil
}
// Open opens the Object for reading
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
sOff = x.Offset
case *fs.RangeOption:
sOff = x.Start
eOff = x.End
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
}
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
}
// Update updates the Object contents
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
name := base(fullPath)
err := o.fs.mkdir(dirPath)
if err != nil {
return err
}
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
o.info = *info
return nil
}
// Remove deletes the remote Object
func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
// Name returns the name of the Fs
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path of the Fs
func (f *Fs) Root() string {
return f.root
}
// String returns a string representation of the Fs
func (f *Fs) String() string {
return "koofr:" + f.mountID + ":" + f.root
}
// Features returns the optional features supported by this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
mounts, err := client.Mounts()
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
client: client,
}
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
f.mountID = m.Id
break
}
} else if m.IsPrimary {
f.mountID = m.Id
break
}
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("Failed to find primary mount")
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
} else {
err = nil
}
return f, err
}
// List returns a list of items in a directory
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
if file.Type == "dir" {
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: path.Join(dir, file.Name),
}
}
}
return entries, nil
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorNotAFile
}
return &Object{
fs: f,
info: info,
remote: remote,
}, nil
}
// Put updates a remote Object
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
name := base(fullPath)
err = f.mkdir(dirPath)
if err != nil {
return nil, err
}
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
return &Object{
fs: f,
info: *info,
remote: src.Remote(),
}, nil
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
// HTTP status 400
func isBadRequest(err error) bool {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusBadRequest {
return true
}
}
return false
}
// translateErrorsDir translates koofr errors to rclone errors (for a dir
// operation)
func translateErrorsDir(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return err
}
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
}
info, err := f.client.FilesInfo(f.mountID, fullPath)
if err == nil && info.Type == "dir" {
return nil
}
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
dirs := strings.Split(fullPath, "/")
parent := "/"
for _, part := range dirs {
if part == "" {
continue
}
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
if err != nil || info.Type != "dir" {
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
err = f.client.FilesNewFolder(f.mountID, parent, part)
if err != nil && !isBadRequest(err) {
return err
}
}
parent = path.Join(parent, part)
}
return nil
}
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
if len(files) > 0 {
return fs.ErrorDirectoryNotEmpty
}
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
return nil
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantMove
}
err = f.client.FilesMove(srcObj.fs.mountID,
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
return fs.ErrorDirExists
}
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return fs.ErrorCantDirMove
}
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
if err != nil {
return fs.ErrorCantDirMove
}
return nil
}
// About reports space usage (with a MB precision)
func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
Trashed: nil,
Other: nil,
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
Objects: nil,
}, nil
}
// Purge purges the complete Fs
func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
// linkCreate is a Koofr API request for creating a public link
type linkCreate struct {
Path string `json:"path"`
}
// link is a Koofr API response to creating a public link
type link struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Counter int64 `json:"counter"`
URL string `json:"url"`
ShortURL string `json:"shortUrl"`
Hash string `json:"hash"`
Host string `json:"host"`
HasPassword bool `json:"hasPassword"`
Password string `json:"password"`
ValidFrom int64 `json:"validFrom"`
ValidTo int64 `json:"validTo"`
PasswordRequired bool `json:"passwordRequired"`
}
// createLink makes a Koofr API call to create a public link
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
linkCreate := linkCreate{
Path: path,
}
linkData := link{}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountID + "/links",
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: linkCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &linkData,
}
_, err := c.Request(&request)
if err != nil {
return nil, err
}
return &linkData, nil
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)
}
return linkData.ShortURL, nil
}

View File

@@ -1,14 +0,0 @@
package koofr_test
import (
"testing"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestKoofr:",
})
}

View File

@@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert
bs := int64(s.Bsize)
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use

View File

@@ -1,20 +0,0 @@
// +build windows plan9
package local
import (
"time"
)
const haveLChtimes = false
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
// Does nothing
return nil
}

View File

@@ -1,28 +0,0 @@
// +build !windows,!plan9
package local
import (
"os"
"time"
"golang.org/x/sys/unix"
)
const haveLChtimes = true
// lChtimes changes the access and modification times of the named
// link, similar to the Unix utime() or utimes() functions.
//
// The underlying filesystem may truncate or round the values to a
// less precise time unit.
// If there is an error, it will be of type *PathError.
func lChtimes(name string, atime time.Time, mtime time.Time) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "lchtimes", Path: name, Err: e}
}
return nil
}

View File

@@ -2,7 +2,6 @@
package local
import (
"bytes"
"fmt"
"io"
"io/ioutil"
@@ -17,19 +16,23 @@ import (
"unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
"google.golang.org/appengine/log"
)
var (
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
)
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
// Register with Fs
func init() {
@@ -38,90 +41,29 @@ func init() {
Description: "Local Disk",
NewFs: NewFs,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Optional: true,
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
}},
}, {
Name: "copy_links",
Help: "Follow symlinks and copy the pointed to item.",
Default: false,
NoPrefix: true,
ShortOpt: "L",
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
}, {
Name: "skip_links",
Help: `Don't warn about skipped symlinks.
This flag disables warning messages on skipped symlinks or junction
points, as you explicitly acknowledge that they should be skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false,
Advanced: true,
}, {
Name: "no_check_updated",
Help: `Don't check to see if the files change during upload
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
check can be disabled with this flag.`,
Default: false,
Advanced: true,
}, {
Name: "one_file_system",
Help: "Don't cross filesystem boundaries (unix/macOS only).",
Default: false,
NoPrefix: true,
ShortOpt: "x",
Advanced: true,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
}
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
nounc bool // Skip UNC conversion on Windows
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
@@ -130,40 +72,30 @@ type Fs struct {
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
fs *Fs // The Fs this object is part of
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
}
// ------------------------------------------------------------
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
// NewFs constructs an Fs from the path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
func NewFs(name, root string) (fs.Fs, error) {
var err error
if *noUTFNorm {
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
nounc := config.FileGet(name, "nounc")
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
nounc: nounc == "true",
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
@@ -173,38 +105,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
}).Fill(f)
if opt.FollowSymlinks {
if *followSymlinks {
f.lstat = os.Stat
}
// Check to see if this points to a file
fi, err := f.lstat(f.root)
if err == nil {
f.dev = readDevice(fi, f.opt.OneFileSystem)
f.dev = readDevice(fi)
}
if err == nil && f.isRegular(fi.Mode()) {
if err == nil && fi.Mode().IsRegular() {
// It is a file, so use the parent as the root
f.root = filepath.Dir(f.root)
f.root, _ = getDirFile(f.root)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Determine whether a file is a 'regular' file,
// Symlinks are regular files, only if the TranslateSymlink
// option is in-effect
func (f *Fs) isRegular(mode os.FileMode) bool {
if !f.opt.TranslateSymlinks {
return mode.IsRegular()
}
// fi.Mode().IsRegular() tests that all mode bits are zero
// Since symlinks are accepted, test that all other bits are zero,
// except the symlink bit
return mode&os.ModeType&^os.ModeSymlink == 0
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -225,48 +143,28 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// caseInsensitive returns whether the remote is case insensitive or not
// caseInsenstive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool {
// FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insensitive Fses on linux.
// sensitive Fses on darwin and case insenstive Fses on linux.
// Should probably check but that would involve creating a
// file in the remote to be most accurate which probably isn't
// desirable.
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
}
// translateLink checks whether the remote is a translated link
// and returns a new path, removing the suffix as needed,
// It also returns whether this is a translated link at all
//
// for regular files, dstPath is returned unchanged
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
return newDstPath, isTranslatedLink
}
// newObject makes a half completed Object
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
translatedLink := false
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
if f.opt.TranslateSymlinks {
// Possibly receive a new name for dstPath
dstPath, translatedLink = translateLink(remote, dstPath)
}
return &Object{
fs: f,
remote: remote,
path: dstPath,
translatedLink: translatedLink,
fs: f,
remote: remote,
path: dstPath,
}
}
@@ -288,11 +186,6 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
}
return nil, err
}
// Handle the odd case, that a symlink was specified by name without the link suffix
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
return nil, fs.ErrorObjectNotFound
}
}
if o.mode.IsDir() {
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
@@ -316,7 +209,6 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
@@ -351,15 +243,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
fi, err = os.Stat(newPath)
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats.Error(err)
continue
}
if err != nil {
return nil, err
}
@@ -368,15 +253,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
@@ -476,7 +357,7 @@ func (f *Fs) Mkdir(dir string) error {
if err != nil {
return err
}
f.dev = readDevice(fi, f.opt.OneFileSystem)
f.dev = readDevice(fi)
}
return nil
}
@@ -590,7 +471,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// OK
} else if err != nil {
return nil, err
} else if !dstObj.fs.isRegular(dstObj.mode) {
} else if !dstObj.mode.IsRegular() {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -649,7 +530,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
// Create parent of destination
dstParentPath := filepath.Dir(dstPath)
dstParentPath, _ := getDirFile(dstPath)
err = os.MkdirAll(dstParentPath, 0777)
if err != nil {
return err
@@ -712,13 +593,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
var in io.ReadCloser
if !o.translatedLink {
in, err = file.Open(o.path)
} else {
in, err = o.openTranslatedLink(0, -1)
}
in, err := os.Open(o.path)
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
@@ -749,12 +624,7 @@ func (o *Object) ModTime() time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
} else {
err = os.Chtimes(o.path, modTime, modTime)
}
err := os.Chtimes(o.path, modTime, modTime)
if err != nil {
return err
}
@@ -772,8 +642,8 @@ func (o *Object) Storable() bool {
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
if mode&os.ModeSymlink != 0 {
if !*skipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}
return false
@@ -798,7 +668,7 @@ type localOpenFile struct {
// Read bytes from the object - see io.Reader
func (file *localOpenFile) Read(p []byte) (n int, err error) {
if !file.o.fs.opt.NoCheckUpdated {
if !*noCheckUpdated {
// Check if file has the same size and modTime
fi, err := file.fd.Stat()
if err != nil {
@@ -833,16 +703,6 @@ func (file *localOpenFile) Close() (err error) {
return err
}
// Returns a ReadCloser() object that contains the contents of a symbolic link
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
// Read the link and return the destination it as the contents of the object
linkdst, err := os.Readlink(o.path)
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
@@ -862,12 +722,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
}
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
}
fd, err := file.Open(o.path)
fd, err := os.Open(o.path)
if err != nil {
return
}
@@ -894,23 +749,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// mkdirAll makes all the directories needed to store the object
func (o *Object) mkdirAll() error {
dir := filepath.Dir(o.path)
dir, _ := getDirFile(o.path)
return os.MkdirAll(dir, 0777)
}
type nopWriterCloser struct {
*bytes.Buffer
}
func (nwc nopWriterCloser) Close() error {
// noop
return nil
}
// Update the object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
@@ -924,23 +768,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return err
}
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
// then create a symlink
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
out = f
} else {
out = nopWriterCloser{&symlinkData}
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
// Calculate the hash of the object we are reading as we go along
@@ -955,26 +785,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
if err == nil {
err = closeErr
}
if o.translatedLink {
if err == nil {
// Remove any current symlink or file, if one exists
if _, err := os.Lstat(o.path); err == nil {
if removeErr := os.Remove(o.path); removeErr != nil {
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
return removeErr
}
}
// Use the contents for the copied object to create a symlink
err = os.Symlink(symlinkData.String(), o.path)
}
// only continue if symlink creation succeeded
if err != nil {
return err
}
}
if err != nil {
fs.Logf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
@@ -1027,6 +837,17 @@ func (o *Object) Remove() error {
return remove(o.path)
}
// Return the directory and file from an OS path. Assumes
// os.PathSeparator is used.
func getDirFile(s string) (string, string) {
i := strings.LastIndex(s, string(os.PathSeparator))
dir, file := s[:i], s[i+1:]
if dir == "" {
dir = string(os.PathSeparator)
}
return dir, file
}
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
@@ -1057,7 +878,7 @@ func (f *Fs) cleanPath(s string) string {
s = s2
}
}
if !f.opt.NoUNC {
if !f.nounc {
// Convert to UNC
s = uncPath(s)
}

View File

@@ -1,19 +1,13 @@
package local
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -44,17 +38,14 @@ func TestUpdatingCheck(t *testing.T) {
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
fd, err := file.Open(path.Join(r.LocalName, filePath))
fd, err := os.Open(path.Join(r.LocalName, filePath))
if err != nil {
t.Fatalf("failed opening file %q: %v", filePath, err)
}
defer func() {
require.NoError(t, fd.Close())
}()
fi, err := fd.Stat()
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
@@ -74,115 +65,14 @@ func TestUpdatingCheck(t *testing.T) {
require.Errorf(t, err, "can't copy - source file is being updated")
// turn the checking off and try again
in.o.fs.opt.NoCheckUpdated = true
*noCheckUpdated = true
defer func() {
*noCheckUpdated = false
}()
r.WriteFile(filePath, "content updated", time.Now())
_, err = in.Read(buf)
require.NoError(t, err)
}
func TestSymlink(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
// Write a file
modTime1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
file1 := r.WriteFile("file.txt", "hello", modTime1)
// Write a symlink
modTime2 := fstest.Time("2002-02-03T04:05:10.123123123Z")
symlinkPath := filepath.Join(dir, "symlink.txt")
require.NoError(t, os.Symlink("file.txt", symlinkPath))
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
// Check with no symlink flags
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
fstest.CheckItems(t, r.Flocal, file1, file2d)
fstest.CheckItems(t, r.Fremote)
// Set fs into "-l" mode
f.opt.FollowSymlinks = false
f.opt.TranslateSymlinks = true
f.lstat = os.Lstat
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2)
}
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
}
// Check it got the correct contents
symlinkPath = filepath.Join(dir, "symlink2.txt")
fi, err := os.Lstat(symlinkPath)
require.NoError(t, err)
assert.False(t, fi.Mode().IsRegular())
linkText, err := os.Readlink(symlinkPath)
require.NoError(t, err)
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject("symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check reading the object
in, err := o.Open()
require.NoError(t, err)
contents, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
// Check reading the object with range
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
}
func TestSymlinkError(t *testing.T) {
m := configmap.Simple{
"links": "true",
"copy_links": "true",
}
_, err := NewFs("local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}

View File

@@ -1,10 +0,0 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

View File

@@ -1,22 +0,0 @@
//+build linux
package local
import (
"os"
"golang.org/x/sys/unix"
)
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
// FIXME could be doing something here
// if err == unix.ENOSPC {
// log.Printf("No space")
// }
return err
}

View File

@@ -1,79 +0,0 @@
//+build windows
package local
import (
"os"
"syscall"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
var (
ntdll = windows.NewLazySystemDLL("ntdll.dll")
ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile")
ntSetInformationFile = ntdll.NewProc("NtSetInformationFile")
)
type fileAllocationInformation struct {
AllocationSize uint64
}
type fileFsSizeInformation struct {
TotalAllocationUnits uint64
AvailableAllocationUnits uint64
SectorsPerAllocationUnit uint32
BytesPerSector uint32
}
type ioStatusBlock struct {
Status, Information uintptr
}
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
var (
iosb ioStatusBlock
fsSizeInfo fileFsSizeInformation
allocInfo fileAllocationInformation
)
// Query info about the block sizes on the file system
_, _, e1 := ntQueryVolumeInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&fsSizeInfo)),
uintptr(unsafe.Sizeof(fsSizeInfo)),
uintptr(3), // FileFsSizeInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed")
}
// Calculate the allocation size
clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit)
if clusterSize <= 0 {
return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize)
}
allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize
// Ask for the allocation
_, _, e1 = ntSetInformationFile.Call(
uintptr(out.Fd()),
uintptr(unsafe.Pointer(&iosb)),
uintptr(unsafe.Pointer(&allocInfo)),
uintptr(unsafe.Sizeof(allocInfo)),
uintptr(19), // FileAllocationInformation
)
if e1 != nil && e1 != syscall.Errno(0) {
return errors.Wrap(e1, "preAllocate NtSetInformationFile failed")
}
return nil
}

View File

@@ -8,6 +8,6 @@ import "os"
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
func readDevice(fi os.FileInfo) uint64 {
return devUnset
}

View File

@@ -9,12 +9,17 @@ import (
"syscall"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/flags"
)
var (
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
)
// readDevice turns a valid os.FileInfo into a device number,
// returning devUnset if it fails.
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
if !oneFileSystem {
func readDevice(fi os.FileInfo) uint64 {
if !*oneFileSystem {
return devUnset
}
statT, ok := fi.Sys().(*syscall.Stat_t)
@@ -22,5 +27,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev) // nolint: unconvert
return uint64(statT.Dev)
}

View File

@@ -24,8 +24,8 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
@@ -39,10 +39,12 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
eventWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
useTrash = true // FIXME make configurable - rclone global
)
var (
megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.")
megaCacheMu sync.Mutex // mutex for the below
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
)
@@ -56,49 +58,23 @@ func init() {
Options: []fs.Option{{
Name: "user",
Help: "User name",
Required: true,
Optional: true,
}, {
Name: "pass",
Help: "Password.",
Required: true,
Optional: true,
IsPassword: true,
}, {
Name: "debug",
Help: `Output more debug from Mega.
If this flag is set (along with -vv) it will print further debugging
information from the mega backend.`,
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: `Delete files permanently rather than putting them into the trash.
Normally the mega backend will put all deletions into the trash rather
than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
}
// Fs represents a remote mega
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed config options
features *fs.Features // optional features
srv *mega.Mega // the connection to the server
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
rootNodeMu sync.Mutex // mutex for _rootNode
_rootNode *mega.Node // root node - call findRoot to use this
mkdirMu sync.Mutex // used to serialize calls to mkdir / rmdir
@@ -169,16 +145,12 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
if opt.Pass != "" {
func NewFs(name, root string) (fs.Fs, error) {
user := config.FileGet(name, "user")
pass := config.FileGet(name, "pass")
if pass != "" {
var err error
opt.Pass, err = obscure.Reveal(opt.Pass)
pass, err = obscure.Reveal(pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't decrypt password")
}
@@ -191,33 +163,32 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// them up between different remotes.
megaCacheMu.Lock()
defer megaCacheMu.Unlock()
srv := megaCache[opt.User]
srv := megaCache[user]
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})
if opt.Debug {
if *megaDebug {
srv.SetDebugger(func(format string, v ...interface{}) {
fs.Debugf("*go-mega*", format, v...)
})
}
err := srv.Login(opt.User, opt.Pass)
err := srv.Login(user, pass)
if err != nil {
return nil, errors.Wrap(err, "couldn't login")
}
megaCache[opt.User] = srv
megaCache[user] = srv
}
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: srv,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
DuplicateFiles: true,
@@ -225,7 +196,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(false)
_, err := f.findRoot(false)
switch err {
case nil:
// root node found and is a directory
@@ -497,7 +468,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
// Returns the dirNode, object, leaf and error
// Returns the dirNode, obect, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
@@ -523,10 +494,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(in, src, options...)
return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(in, src)
@@ -569,7 +540,7 @@ func (f *Fs) Mkdir(dir string) error {
// deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, f.opt.HardDelete)
err = f.srv.Delete(node, !useTrash)
return shouldRetry(err)
})
return err
@@ -847,14 +818,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
}
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
}
}
return nil
@@ -1076,9 +1047,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// The new object may have been created if an error is returned
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size()
if size < 0 {
return errors.New("mega backend can't upload a file of unknown length")
}
//modTime := src.ModTime()
remote := o.Remote()
@@ -1129,7 +1097,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return errors.Wrap(err, "failed to finish upload")
}
// If the upload succeeded and the original object existed, then delete it
// If the upload succeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(o.info)
if err != nil {

View File

@@ -2,16 +2,10 @@
package api
import (
"strings"
"time"
)
import "time"
const (
timeFormat = `"` + time.RFC3339 + `"`
// PackageTypeOneNote is the package type value for OneNote files
PackageTypeOneNote = "oneNote"
)
// Error is returned from one drive when things go wrong
@@ -25,7 +19,7 @@ type Error struct {
} `json:"error"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := e.ErrorInfo.Code
if e.ErrorInfo.InnerError.Code != "" {
@@ -35,7 +29,7 @@ func (e *Error) Error() string {
return out
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// Identity represents an identity of an actor. For example, and actor
@@ -94,27 +88,9 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
// ItemReference groups data needed to reference a OneDrive item
// across the service into a single structure.
type ItemReference struct {
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
DriveType string `json:"driveType"` // Type of the drive, Read-Only
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item
type RemoteItemFacet struct {
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
ID string `json:"id"` // Unique identifier for the item. Read/Write.
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
}
// FolderFacet groups folder-related data on OneDrive into a single structure
@@ -151,13 +127,6 @@ type FileSystemInfoFacet struct {
type DeletedFacet struct {
}
// PackageFacet indicates that a DriveItem is the top level item
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
// `oneNote` is the only currently defined value.
type PackageFacet struct {
Type string `json:"type"`
}
// Item represents metadata for an item in OneDrive
type Item struct {
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
@@ -174,14 +143,12 @@ type Item struct {
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
}
@@ -250,28 +217,6 @@ type MoveItemRequest struct {
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
}
//CreateShareLinkRequest is the request to create a sharing link
//Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
}
//CreateShareLinkResponse is the response from CreateShareLinkRequest
type CreateShareLinkResponse struct {
ID string `json:"id"`
Roles []string `json:"roles"`
Link struct {
Type string `json:"type"`
Scope string `json:"scope"`
WebURL string `json:"webUrl"`
Application struct {
ID string `json:"id"`
DisplayName string `json:"displayName"`
} `json:"application"`
} `json:"link"`
}
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
//
// The following API calls return AsyncOperationStatus resources:
@@ -279,134 +224,7 @@ type CreateShareLinkResponse struct {
// Copy Item
// Upload From URL
type AsyncOperationStatus struct {
Operation string `json:"operation"` // The type of job being run.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
}
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # seperator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *Item) GetID() string {
if i.IsRemote() && i.RemoteItem.ID != "" {
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
return i.ParentReference.DriveID + "#" + i.ID
}
return i.ID
}
// GetDriveID returns a normalized ParentReference of the item
func (i *Item) GetDriveID() string {
return i.GetParentReference().DriveID
}
// GetName returns a normalized Name of the item
func (i *Item) GetName() string {
if i.IsRemote() && i.RemoteItem.Name != "" {
return i.RemoteItem.Name
}
return i.Name
}
// GetFolder returns a normalized Folder of the item
func (i *Item) GetFolder() *FolderFacet {
if i.IsRemote() && i.RemoteItem.Folder != nil {
return i.RemoteItem.Folder
}
return i.Folder
}
// GetPackage returns a normalized Package of the item
func (i *Item) GetPackage() *PackageFacet {
if i.IsRemote() && i.RemoteItem.Package != nil {
return i.RemoteItem.Package
}
return i.Package
}
// GetPackageType returns the package type of the item if available,
// otherwise ""
func (i *Item) GetPackageType() string {
pack := i.GetPackage()
if pack == nil {
return ""
}
return pack.Type
}
// GetFile returns a normalized File of the item
func (i *Item) GetFile() *FileFacet {
if i.IsRemote() && i.RemoteItem.File != nil {
return i.RemoteItem.File
}
return i.File
}
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
return i.RemoteItem.FileSystemInfo
}
return i.FileSystemInfo
}
// GetSize returns a normalized Size of the item
func (i *Item) GetSize() int64 {
if i.IsRemote() && i.RemoteItem.Size != 0 {
return i.RemoteItem.Size
}
return i.Size
}
// GetWebURL returns a normalized WebURL of the item
func (i *Item) GetWebURL() string {
if i.IsRemote() && i.RemoteItem.WebURL != "" {
return i.RemoteItem.WebURL
}
return i.WebURL
}
// GetCreatedBy returns a normalized CreatedBy of the item
func (i *Item) GetCreatedBy() IdentitySet {
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
return i.RemoteItem.CreatedBy
}
return i.CreatedBy
}
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
func (i *Item) GetLastModifiedBy() IdentitySet {
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
return i.RemoteItem.LastModifiedBy
}
return i.LastModifiedBy
}
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
func (i *Item) GetCreatedDateTime() Timestamp {
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
return i.RemoteItem.CreatedDateTime
}
return i.CreatedDateTime
}
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
func (i *Item) GetLastModifiedDateTime() Timestamp {
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
return i.RemoteItem.LastModifiedDateTime
}
return i.LastModifiedDateTime
}
// GetParentReference returns a normalized ParentReference of the item
func (i *Item) GetParentReference() *ItemReference {
if i.IsRemote() && i.ParentReference == nil {
return i.RemoteItem.ParentReference
}
return i.ParentReference
}
// IsRemote checks if item is a remote item
func (i *Item) IsRemote() bool {
return i.RemoteItem != nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,10 @@
// Test OneDrive filesystem interface
package onedrive
package onedrive_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/onedrive"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -12,15 +12,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDrive:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
},
NilObject: (*onedrive.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -140,7 +140,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
got := h.Sum(nil)
want, err := base64.StdEncoding.DecodeString(test.out)
require.NoError(t, err, what)
assert.Equal(t, want, got, test.size, what)
assert.Equal(t, want, got[:], test.size, what)
}
}
}

View File

@@ -6,22 +6,19 @@ import (
"io"
"mime/multipart"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
@@ -40,32 +37,25 @@ func init() {
Description: "OpenDrive",
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: "Username",
Required: true,
Name: "username",
Help: "Username",
}, {
Name: "password",
Help: "Password.",
IsPassword: true,
Required: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UserName string `config:"username"`
Password string `config:"password"`
}
// Fs represents a remote server
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
username string // account name
password string // auth key0
srv *rest.Client // the connection to the server
pacer *fs.Pacer // To pace and retry the API calls
pacer *pacer.Pacer // To pace and retry the API calls
session UserSessionInfo // contains the session data
dirCache *dircache.DirCache // Map of directory path to directory id
}
@@ -119,32 +109,28 @@ func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
if opt.UserName == "" {
username := config.FileGet(name, "username")
if username == "" {
return nil, errors.New("username not found")
}
opt.Password, err = obscure.Reveal(opt.Password)
password, err := obscure.Reveal(config.FileGet(name, "password"))
if err != nil {
return nil, errors.New("password could not revealed")
return nil, errors.New("password coudl not revealed")
}
if opt.Password == "" {
if password == "" {
return nil, errors.New("password not found")
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
name: name,
username: username,
password: password,
root: root,
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.dirCache = dircache.New(root, "0", f)
@@ -155,7 +141,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// get sessionID
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
account := Account{Username: opt.UserName, Password: opt.Password}
account := Account{Username: username, Password: password}
opts := rest.Opts{
Method: "POST",
@@ -179,17 +165,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, "0", &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -197,13 +183,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -287,6 +268,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
return err
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
@@ -782,7 +766,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
remote := path.Join(dir, folder.Name)
// cache the directory ID for later lookups
f.dirCache.Put(remote, folder.FolderID)
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
d.SetItems(int64(folder.ChildFolders))
entries = append(entries, d)
}
@@ -929,9 +913,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
// 10 MB chunks size
// 1 MB chunks size
chunkSize := int64(1024 * 1024 * 10)
buf := make([]byte, int(chunkSize))
chunkOffset := int64(0)
remainingBytes := size
chunkCounter := 0
@@ -944,19 +927,14 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
remainingBytes -= currentChunkSize
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, err
}
var formBody bytes.Buffer
w := multipart.NewWriter(&formBody)
fw, err := w.CreateFormFile("file_data", o.remote)
if err != nil {
return false, err
}
if _, err = io.Copy(fw, chunk); err != nil {
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
return false, err
}
// Add session_id
@@ -1087,7 +1065,7 @@ func (o *Object) readMetaData() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
}
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err)

View File

@@ -13,7 +13,7 @@ type Error struct {
} `json:"error"`
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
}

View File

@@ -41,7 +41,7 @@ type Error struct {
ErrorString string `json:"error"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
}
@@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
return e
}
// Check Error satisfies the error interface
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// Item describes a folder or a file as returned by Get Folder Items and others
@@ -161,6 +161,7 @@ type UserInfo struct {
PublicLinkQuota int64 `json:"publiclinkquota"`
Email string `json:"email"`
UserID int `json:"userid"`
Result int `json:"result"`
Quota int64 `json:"quota"`
TrashRevretentionDays int `json:"trashrevretentiondays"`
Premium bool `json:"premium"`

View File

@@ -23,8 +23,6 @@ import (
"github.com/ncw/rclone/backend/pcloud/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
@@ -67,35 +65,30 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("pcloud", name, m, oauthConfig)
Config: func(name string) {
err := oauthutil.Config("pcloud", name, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Pcloud App Client Id\nLeave blank normally.",
Help: "Pcloud App Client Id - leave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret\nLeave blank normally.",
Help: "Pcloud App Client Secret - leave blank normally.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
}
// Fs represents a remote pcloud
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
pacer *pacer.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
@@ -236,25 +229,18 @@ func errorHandler(resp *http.Response) error {
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
func NewFs(name, root string) (fs.Fs, error) {
root = parsePath(root)
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Pcloud")
log.Fatalf("Failed to configure Pcloud: %v", err)
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
}
f.features = (&fs.Features{
CaseInsensitive: false,
@@ -276,16 +262,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
newF := *f
newF.dirCache = dircache.New(newRoot, rootID, &newF)
newF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(false)
err = newF.dirCache.FindRoot(false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(remote, nil)
_, err := newF.newObjectWithInfo(remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -293,13 +279,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
return nil, err
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
return &newF, fs.ErrorIsFile
}
return f, nil
}
@@ -385,7 +366,7 @@ func fileIDtoNumber(fileID string) string {
if len(fileID) > 0 && fileID[0] == 'f' {
return fileID[1:]
}
fs.Debugf(nil, "Invalid file id %q", fileID)
fs.Debugf(nil, "Invalid filee id %q", fileID)
return fileID
}
@@ -1112,12 +1093,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return shouldRetry(resp, err)
})
if err != nil {
// sometimes pcloud leaves a half complete file on
// error, so delete it if it exists
delObj, delErr := o.fs.NewObject(o.remote)
if delErr == nil && delObj != nil {
_ = delObj.Remove()
}
return err
}
if len(result.Items) != 1 {

View File

@@ -17,8 +17,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
@@ -35,91 +34,57 @@ func init() {
Description: "QingCloud Object Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
}},
Name: "env_auth",
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter QingStor credentials in the next step",
}, {
Value: "true",
Help: "Get QingStor credentials from the environment (env vars or IAM)",
},
},
}, {
Name: "access_key_id",
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
}, {
Name: "endpoint",
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
}, {
Name: "zone",
Help: "Zone to connect to.\nDefault is \"pek3a\".",
Examples: []fs.OptionExample{{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
}, {
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
}, {
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
}},
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
Examples: []fs.OptionExample{
{
Value: "pek3a",
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
},
{
Value: "sh1a",
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
},
{
Value: "gd2a",
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
},
},
}, {
Name: "connection_retries",
Help: "Number of connection retries.",
Default: 3,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--qingstor-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 1,
Advanced: true,
Name: "connection_retries",
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
}},
})
}
// Constants
const (
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(minMultiPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
listLimitSize = 1000 // Number of items to read at once
maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
)
// Globals
@@ -130,31 +95,17 @@ func timestampToTime(tp int64) time.Time {
return tm.UTC()
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Endpoint string `config:"endpoint"`
Zone string `config:"zone"`
ConnectionRetries int `config:"connection_retries"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
}
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
root string // The root is a subdir, is a special object
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
}
// Object describes a qingstor object
@@ -175,12 +126,10 @@ type Object struct {
// ------------------------------------------------------------
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
@@ -216,12 +165,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
}
// qsConnection makes a connection to qingstor
func qsServiceConnection(opt *Options) (*qs.Service, error) {
accessKeyID := opt.AccessKeyID
secretAccessKey := opt.SecretAccessKey
func qsServiceConnection(name string) (*qs.Service, error) {
accessKeyID := config.FileGet(name, "access_key_id")
secretAccessKey := config.FileGet(name, "secret_access_key")
switch {
case opt.EnvAuth:
case config.FileGetBool(name, "env_auth", false):
// No need for empty checks if "env_auth" is true
case accessKeyID == "" && secretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
@@ -235,7 +184,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
host := "qingstor.com"
port := 443
endpoint := opt.Endpoint
endpoint := config.FileGet(name, "endpoint", "")
if endpoint != "" {
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
@@ -255,87 +204,48 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
}
cf, err := qsConfig.NewDefault()
if err != nil {
return nil, err
connectionRetries := 3
retries := config.FileGet(name, "connection_retries", "")
if retries != "" {
connectionRetries, _ = strconv.Atoi(retries)
}
cf, err := qsConfig.NewDefault()
cf.AccessKeyID = accessKeyID
cf.SecretAccessKey = secretAccessKey
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
cf.ConnectionRetries = connectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)
}
svc, _ := qs.Init(cf)
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
return svc, err
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "qingstor: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
func NewFs(name, root string) (fs.Fs, error) {
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
svc, err := qsServiceConnection(name)
if err != nil {
return nil, err
}
if opt.Zone == "" {
opt.Zone = "pek3a"
zone := config.FileGet(name, "zone")
if zone == "" {
zone = "pek3a"
}
f := &Fs{
name: name,
zone: zone,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
svc: svc,
}
f.features = (&fs.Features{
ReadMimeType: true,
@@ -348,7 +258,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
bucketInit, err := svc.Bucket(bucket, zone)
if err != nil {
return nil, err
}
@@ -449,7 +359,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
_, err = bucketInit.PutObject(key, &req)
if err != nil {
fs.Debugf(f, "Copy Failed, API Error: %v", err)
fs.Debugf(f, "Copied Faild, API Error: %v", err)
return nil, err
}
return f.NewObject(remote)
@@ -756,7 +666,7 @@ func (f *Fs) Mkdir(dir string) error {
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
@@ -875,7 +785,7 @@ func (o *Object) readMetaData() (err error) {
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
@@ -994,24 +904,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
mimeType := fs.MimeType(src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
body: in,
qsSvc: o.fs.svc,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: key,
mimeType: mimeType,
}
uploader := newUploader(&req)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if multipart {
err = uploader.upload()
} else {
err = uploader.singlePartUpload(in, size)
}
err = uploader.upload()
if err != nil {
return err
}

View File

@@ -2,12 +2,12 @@
// +build !plan9
package qingstor
package qingstor_test
import (
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/backend/qingstor"
"github.com/ncw/rclone/fstest/fstests"
)
@@ -15,19 +15,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQingStor:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
NilObject: (*qingstor.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -143,7 +143,7 @@ func (u *uploader) init() {
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
@@ -152,18 +152,18 @@ func (u *uploader) init() {
}
// singlePartUpload upload a single object that contentLength less than "defaultUploadPartSize"
func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
func (u *uploader) singlePartUpload(buf io.ReadSeeker) error {
bucketInit, _ := u.bucketInit()
req := qs.PutObjectInput{
ContentLength: &size,
ContentLength: &u.readerPos,
ContentType: &u.cfg.mimeType,
Body: buf,
}
_, err := bucketInit.PutObject(u.cfg.key, &req)
if err == nil {
fs.Debugf(u, "Upload single object finished")
fs.Debugf(u, "Upload single objcet finished")
}
return err
}
@@ -179,13 +179,13 @@ func (u *uploader) upload() error {
// Do one read to determine if we have more than one part
reader, _, err := u.nextReader()
if err == io.EOF { // single part
fs.Debugf(u, "Uploading as single part object to QingStor")
return u.singlePartUpload(reader, u.readerPos)
fs.Debugf(u, "Tried to upload a singile object to QingStor")
return u.singlePartUpload(reader)
} else if err != nil {
return errors.Errorf("read upload data failed: %s", err)
}
fs.Debugf(u, "Uploading as multi-part object to QingStor")
fs.Debugf(u, "Treied to upload a multi-part object to QingStor")
mu := multiUploader{uploader: u}
return mu.multiPartUpload(reader)
}
@@ -261,7 +261,7 @@ func (mu *multiUploader) initiate() error {
req := qs.InitiateMultipartUploadInput{
ContentType: &mu.cfg.mimeType,
}
fs.Debugf(mu, "Initiating a multi-part upload")
fs.Debugf(mu, "Tried to initiate a multi-part upload")
rsp, err := bucketInit.InitiateMultipartUpload(mu.cfg.key, &req)
if err == nil {
mu.uploadID = rsp.UploadID
@@ -279,12 +279,12 @@ func (mu *multiUploader) send(c chunk) error {
ContentLength: &c.size,
Body: c.buffer,
}
fs.Debugf(mu, "Uploading a part to QingStor with partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Tried to upload a part to QingStor that partNumber %d and partSize %d", c.partNumber, c.size)
_, err := bucketInit.UploadMultipart(mu.cfg.key, &req)
if err != nil {
return err
}
fs.Debugf(mu, "Done uploading part partNumber %d and partSize %d", c.partNumber, c.size)
fs.Debugf(mu, "Upload part finished that partNumber %d and partSize %d", c.partNumber, c.size)
mu.mtx.Lock()
defer mu.mtx.Unlock()
@@ -304,7 +304,7 @@ func (mu *multiUploader) list() error {
req := qs.ListMultipartInput{
UploadID: mu.uploadID,
}
fs.Debugf(mu, "Reading multi-part details")
fs.Debugf(mu, "Tried to list a multi-part")
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
if err == nil {
mu.objectParts = rsp.ObjectParts
@@ -331,7 +331,7 @@ func (mu *multiUploader) complete() error {
ObjectParts: mu.objectParts,
ETag: &md5String,
}
fs.Debugf(mu, "Completing multi-part object")
fs.Debugf(mu, "Tried to complete a multi-part")
_, err = bucketInit.CompleteMultipartUpload(mu.cfg.key, &req)
if err == nil {
fs.Debugf(mu, "Complete multi-part finished")
@@ -348,7 +348,7 @@ func (mu *multiUploader) abort() error {
req := qs.AbortMultipartUploadInput{
UploadID: uploadID,
}
fs.Debugf(mu, "Aborting multi-part object %q", *uploadID)
fs.Debugf(mu, "Tried to abort a multi-part")
_, err = bucketInit.AbortMultipartUpload(mu.cfg.key, &req)
}
@@ -392,14 +392,6 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
var nextChunkLen int
reader, nextChunkLen, err = mu.nextReader()
if err != nil && err != io.EOF {
// empty ch
go func() {
for range ch {
}
}()
// Wait for all goroutines finish
close(ch)
mu.wg.Wait()
return err
}
if nextChunkLen == 0 && partNumber > 0 {

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More