1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-06 10:33:34 +00:00

Compare commits

..

1 Commits
v1.38 ... v1.02

Author SHA1 Message Date
Nick Craig-Wood
094cc27621 Version 1.02 2014-07-19 13:12:20 +01:00
7269 changed files with 4915 additions and 4637300 deletions

View File

@@ -1,46 +0,0 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\ncw\rclone
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_BRANCH%" == "master" make upload_beta

View File

@@ -1,31 +0,0 @@
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=billziss/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
- store_artifacts:
path: /tmp/rclone.dist

4
.gitignore vendored
View File

@@ -1,5 +1,9 @@
*~
_junk/
rclone
rclonetest/rclonetest
build
docs/public
README.html
README.txt
rclone.1

View File

@@ -1,2 +0,0 @@
default_dependencies: false
cli: rclone

View File

@@ -1,43 +1,11 @@
language: go
sudo: false
osx_image: xcode7.3
os:
- linux
go:
- 1.6.4
- 1.7.6
- 1.8.3
- 1.9
- tip
install:
- git fetch --unshallow --tags
- make vars
- make build_dep
- 1.1.2
- 1.2.2
- 1.3
- tip
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
matrix:
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
addons:
apt:
packages:
- fuse
- libfuse-dev
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: 1.9
env: GOTAGS=""
deploy:
provider: script
script: make travis_beta
on:
branch: master
go: 1.9
condition: "`uname` == 'Linux'"
- go get ./...
- go test -v ./...

View File

@@ -1,276 +0,0 @@
# Contributing to rclone #
This is a short guide on how to contribute things to rclone.
## Reporting a bug ##
If you've just got a question or aren't sure if you've found a bug
then please use the [rclone forum](https://forum.rclone.org/) instead
of filing an issue.
When filing an issue, please include the following information if
possible as well as a description of the problem. Make sure you test
with the [latest beta of rclone](https://beta.rclone.org/):
* Rclone version (eg output from `rclone -V`)
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
* The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
* A log of the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
* if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a pull request ##
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via Github.
If it is a big feature then make an issue first so it can be discussed.
You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's Github
page](https://github.com/ncw/rclone).
Now in your terminal
go get github.com/ncw/rclone
cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git
Make a branch to add your new feature
git checkout -b my-new-feature
And get hacking.
When ready - run the unit tests for the code you changed
go test -v
Note that you may need to make a test remote, eg `TestSwift` for some
of the unit tests.
Note the top level Makefile targets
* make check
* make test
Both of these will be run by Travis when you make a pull request but
you can do this yourself locally too. These require some extra go
packages which you can install with
* make build_dep
Make sure you
* Add documentation for a new feature (see below for where)
* Add unit tests for a new feature
* squash commits down to one per feature
* rebase to master `git rebase master`
When you are done with that
git push origin my-new-feature
Go to the Github website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to Github with `--force`.
## Testing ##
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
run against any of the backends. This is done by making specially
named remotes in the default config file.
If you wanted to test changes in the `drive` backend, then you would
need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
cd drive
go test -v
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd ../fs
go test -v -remote TestDrive:
go test -v -remote TestDrive: -subdir
If you want to run all the integration tests against all the remotes,
then run in that directory
go run test_all.go
## Writing Documentation ##
If you are adding a new feature then please update the documentation.
If you add a new flag, then if it is a general flag, document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order. If it is a remote specific flag, then document it
in `docs/content/remote.md`.
The only documentation you need to edit are the `docs/content/*.md`
files. The MANUAL.*, rclone.1, web site etc are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
file.
## Commit messages ##
Please make the first line of your commit message a summary of the
change, and prefix it with the directory of the change followed by a
colon. The changelog gets made by looking at just these first lines
so make it good!
If you have more to say about the commit, then enter a blank line and
carry on the description. Remember to say why the change was needed -
the commit itself shows what was changed.
If the change fixes an issue then write `Fixes #1234` in the commit
message. This can be on the subject line if it will fit. If you
don't want to close the associated issue just put `#1234` and the
change will get linked into the issue.
Here is an example of a short commit message:
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```
mount: fix hang on errored upload
In certain circumstances if an upload failed then the mount could hang
indefinitely. This was fixed by closing the read pipe after the Put
completed. This will cause the write side to return a pipe closed
error fixing the hang.
Fixes #1498
```
## Adding a dependency ##
rclone uses the [dep](https://github.com/golang/dep) tool to manage
its dependencies. All code that rclone needs for building is stored
in the `vendor` directory for perfectly reproducable builds.
The `vendor` directory is entirely managed by the `dep` tool.
To add a new dependency
dep ensure github.com/pkg/errors
You can add constraints on that package (see the `dep` documentation),
but don't unless you really need to.
Please check in the changes generated by dep including the `vendor`
directory and `Godep.toml` and `Godep.locl` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.
## Updating a dependency ##
If you need to update a dependency then run
dep ensure -update github.com/pkg/errors
Check in in a single commit as above.
## Updating all the dependencies ##
In order to update all the dependencies then run `make update`. This
just runs `dep ensure -update`. Check in the changes in a single
commit as above.
This should be done early in the release cycle to pick up new versions
of packages in time for them to get some testing.
## Updating a backend ##
If you update a backend then please run the unit tests and the
integration tests for that backend.
Assuming the backend is called `remote`, make create a config entry
called `TestRemote` for the tests to use.
Now `cd remote` and run `go test -v` to run the unit tests.
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
integration tests.
The next section goes into more detail about the tests.
## Writing a new backend ##
Choose a name. The docs here will use `remote` as an example.
Note that in rclone terminology a file system backend is called a
remote or an fs.
Research
* Look at the interfaces defined in `fs/fs.go`
* Study one or more of the existing remotes
Getting going
* Create `remote/remote.go` (copy this from a similar remote)
* onedrive is a good one to start from if you have a directory based remote
* b2 is a good one to start from if you have a bucket based remote
* Add your remote to the imports in `fs/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
* Add your fs to the end of `fstest/fstests/gen_tests.go`
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
* Make sure all tests pass with `go test -v`
Integration tests
* Add your fs to `fs/test_all.go`
* Make sure integration tests pass with
* `cd fs`
* `go test -v -remote TestRemote:`
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main Github page
* `docs/content/remote.md` - main docs page
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
* `cmd/cmd.go` - the main help for rclone

297
Gopkg.lock generated
View File

@@ -1,297 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "bazil.org/fuse"
packages = [".","fs","fuseutil"]
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "a5913b3f7deecba45e98ff33cefbac4fd204ddd7"
version = "v0.10.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["storage"]
revision = "5b6066bbd213e47c49a5fa2be2b29529bbcb6704"
version = "v10.1.1-beta"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "10791a4516e77c53ab10f198f144804cca3d5b43"
version = "v8.1.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
[[projects]]
branch = "master"
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "4cc8cc5a2a44f01d31b303a7280e20e00a6eafdb"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "fb478f41c87d959e328f2eac0c1b40f17a2f3e00"
[[projects]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
revision = "bd544a64249f735bcf394ab0c4fa8bec9b31f221"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "35bcf037030dcadcd247618c75c00c6cd17482d7"
version = "v1.0.2"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
version = "v1.0.6"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
version = "v3.0.0"
[[projects]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = ["dropbox","dropbox/async","dropbox/file_properties","dropbox/files"]
revision = "98997935f6b3ff4f4fa46275abaa23b02537178d"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "d3de07a94d22b4a0972deb4b96d790c2c0ce8333"
version = "v1.28.0"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "0a4f71a498b7c4812f64969510bcb4eca251e33a"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
branch = "master"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "769512c448b98e9efa243279a7e281248332aa98"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
version = "0.2.2"
[[projects]]
branch = "master"
name = "github.com/kr/fs"
packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/ncw/go-acd"
packages = ["."]
revision = "96a49aad3fc3889629f2eceb004927386884bd92"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "5068c3506cf003c630c94b92a64e978115394f26"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "4ed959e0540971545eddb8c75514973d670cf739"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = ["check","convert","json","yaml"]
revision = "454950d6a0782c34427d4f29b46c6bf447256f20"
version = "v0.0.8"
[[projects]]
branch = "master"
name = "github.com/pkg/errors"
packages = ["."]
revision = "c605e284fe17294bda444b34710735b29d1a9d90"
[[projects]]
branch = "master"
name = "github.com/pkg/sftp"
packages = ["."]
revision = "4f3e725e885c021085d2fb8a9cc26e30ea1a992f"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "da627cc50b6fb2eb623eaffe91fb29d7eddfd06a"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "0b647d0506a698cca42caca173e55559b12a69f2"
version = "v1.4"
[[projects]]
name = "github.com/satori/uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/shurcooL/sanitized_anchor_name"
packages = ["."]
revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "a3f95b5c423586578a4e099b11a46c2479628cac"
version = "1.0.2"
[[projects]]
branch = "master"
name = "github.com/skratchdot/open-golang"
packages = ["open"]
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
branch = "master"
name = "github.com/spf13/cobra"
packages = [".","doc"]
revision = "2df9a531813370438a4d79bfc33e21f58063ed87"
[[projects]]
branch = "master"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c"
[[projects]]
branch = "master"
name = "github.com/stretchr/testify"
packages = ["assert","require"]
revision = "05e8a0eda380579888eb53c394909df027f06991"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"]
revision = "4749bc5ebe4857b353e55b49ebe91eed9a7f6cda"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
revision = "6914964337150723782436d56b3f21610a74ce7b"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","html","html/atom"]
revision = "ab5485076ff3407ad2d02db054635913f017b0ed"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "b53b38ad8a6435bd399ea76d0fa74f23149cca4e"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "02a66801d979d706a4b445e24e03916eeaa2a404"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
revision = "836efe42bb4aa16aaa17b9c155d8813d336ed720"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "8be79e1e0910c292df4e79c241bb7e8f7e725959"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
revision = "295e4bb0ade057ae2cfb9876ab0b54635dbfcea4"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","log","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "25c4ec802a7d637f88d584ab26798e94ad14c13b"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "119fcfb60b243cc6c75c9ebd02ddc913a1492a128cb7bdecb7ef180a0fc5be44"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,138 +0,0 @@
## Gopkg.toml example (these lines may be deleted)
## "required" lists a set of packages (not projects) that must be included in
## Gopkg.lock. This list is merged with the set of packages imported by the current
## project. Use it when your project needs a package it doesn't explicitly import -
## including "main" packages.
# required = ["github.com/user/thing/cmd/thing"]
## "ignored" lists a set of packages (not projects) that are ignored when
## dep statically analyzes source code. Ignored packages can be in this project,
## or in a dependency.
# ignored = ["github.com/user/project/badpkg"]
## Dependencies define constraints on dependent projects. They are respected by
## dep whether coming from the Gopkg.toml of the current project or a dependency.
# [[constraint]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
#
## Recommended: the version constraint to enforce for the project.
## Only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: an alternate location (URL or import path) for the project's source.
# source = "https://github.com/myfork/package.git"
## Overrides have the same structure as [[constraint]], but supercede all
## [[constraint]] declarations from all projects. Only the current project's
## [[override]] are applied.
##
## Overrides are a sledgehammer. Use them only as a last resort.
# [[override]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
#
## Optional: specifying a version constraint override will cause all other
## constraints on this project to be ignored; only the overriden constraint
## need be satisfied.
## Again, only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: specifying an alternate source location as an override will
## enforce that the alternate location is used for that project, regardless of
## what source location any dependent projects specify.
# source = "https://github.com/myfork/package.git"
[[constraint]]
branch = "master"
name = "bazil.org/fuse"
[[constraint]]
branch = "master"
name = "github.com/Unknwon/goconfig"
[[constraint]]
branch = "master"
name = "github.com/VividCortex/ewma"
[[constraint]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
[[constraint]]
branch = "master"
name = "github.com/ncw/go-acd"
[[constraint]]
branch = "master"
name = "github.com/ncw/swift"
[[constraint]]
branch = "master"
name = "github.com/pkg/errors"
[[constraint]]
branch = "master"
name = "github.com/pkg/sftp"
[[constraint]]
branch = "master"
name = "github.com/rfjakob/eme"
[[constraint]]
branch = "master"
name = "github.com/skratchdot/open-golang"
[[constraint]]
branch = "master"
name = "github.com/spf13/cobra"
[[constraint]]
branch = "master"
name = "github.com/spf13/pflag"
[[constraint]]
branch = "master"
name = "github.com/stacktic/dropbox"
[[constraint]]
branch = "master"
name = "github.com/stretchr/testify"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
[[constraint]]
branch = "master"
name = "golang.org/x/sys"
[[constraint]]
branch = "master"
name = "golang.org/x/text"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
[[constraint]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"

View File

@@ -1,17 +0,0 @@
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
https://beta.rclone.org/
https://rclone.org/downloads/
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
> What is your rclone version (eg output from `rclone -V`)
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
> Which cloud storage system are you using? (eg Google Drive)
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)

View File

@@ -1,60 +0,0 @@
# Maintainers guide for rclone #
Current active maintainers of rclone are
* Nick Craig-Wood
* Stefan Breunig
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer.
## Triaging Tickets ##
***FIXME*** this section needs some work!
When a ticket comes in it should be triaged. This means it should be classified into a bug or an enhancement or a request for support.
Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket.
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "quickie" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Unplanned. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
***FIXME*** I don't think I've quite got the milestone thing sorted yet. I was wondering about classifying them into priority, or what?
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## TODO ##
I should probably make a mailing list for maintainers or at least an rclone-dev list, and I should probably make a dev@rclone.org to register with cloud providers.

File diff suppressed because it is too large Load Diff

8331
MANUAL.md

File diff suppressed because it is too large Load Diff

8145
MANUAL.txt

File diff suppressed because it is too large Load Diff

165
Makefile
View File

@@ -1,176 +1,59 @@
SHELL = /bin/bash
TAG := $(shell echo `git describe --abbrev=8 --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
TAG := $(shell git describe --tags)
LAST_TAG := $(shell git describe --tags --abbrev=0)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
GO_LATEST := $(findstring go1.9,$(GO_VERSION))
BETA_URL := https://beta.rclone.org/$(TAG)/
# Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)"
endif
.PHONY: rclone vars version
rclone: *.go */*.go
@go version
go build
rclone:
touch fs/version.go
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
doc: rclone.1 README.html README.txt
vars:
@echo SHELL="'$(SHELL)'"
@echo TAG="'$(TAG)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo GO_LATEST="'$(GO_LATEST)'"
@echo BETA_URL="'$(BETA_URL)'"
rclone.1: README.md
pandoc -s --from markdown --to man README.md -o rclone.1
version:
@echo '$(TAG)'
README.html: README.md
pandoc -s --from markdown_github --to html README.md -o README.html
# Full suite of integration tests
test: rclone
-go test $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-cd fs && go run $(BUILDTAGS) test_all.go 2>&1 | tee test_all.log
@echo "Written logs in test.log and fs/test_all.log"
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
ifdef GO_LATEST
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
ifdef GO_LATEST
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
errcheck $(BUILDTAGS) $(GO_FILES)
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | grep -v /vendor/ | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
else
@echo Skipping tests as not on Go stable
endif
# Get the build dependencies
build_dep:
ifdef GO_LATEST
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/inconshreveable/mousetrap
go get -u github.com/tools/godep
endif
# Update dependencies
update:
go get -u github.com/golang/dep/cmd/dep
dep ensure -update -v
doc: rclone.1 MANUAL.html MANUAL.txt
rclone.1: MANUAL.md
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
rclone gendocs docs/content/commands/
README.txt: README.md
pandoc -s --from markdown_github --to plain README.md -o README.txt
install: rclone
install -d ${DESTDIR}/usr/bin
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
install -t ${DESTDIR}/usr/bin rclone
clean:
go clean ./...
find . -name \*~ | xargs -r rm -f
rm -rf build docs/public
rm -f rclone rclonetest/rclonetest
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
website:
cd docs && hugo
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
./rclone -v sync docs/public memstore:www-rclone-org
upload:
rclone -v copy build/ memstore:downloads-rclone-org
upload_github:
./bin/upload-github $(TAG)
./rclone -v copy build/ memstore:downloads-rclone-org
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
./cross-compile $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
log_since_last_release:
git log $(LAST_TAG)..
upload_beta:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
@echo Beta release ready at $(BETA_URL)
compile_all:
ifdef GO_LATEST
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
else
@echo Skipping compile all as not on Go stable
endif
travis_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
@echo Beta release ready at $(BETA_URL)
# Fetch the windows builds from appveyor
fetch_windows:
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
md5sum build/rclone-*-windows-*.zip | sort
serve: website
serve:
cd docs && hugo server -v -w
tag: doc
tag:
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/shortcodes/version.html
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
git tag $(NEW_TAG)
@echo "Edit the new changelog in docs/content/changelog.md"
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
@echo "Add this to changelog in README.md"
@echo " * $(NEW_TAG) -" `date -I`
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
@echo "Then commit the changes"
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo git commit -m "Version $(NEW_TAG)" -a -v
@echo "And finally run make retag before make cross etc"
retag:
git tag -f $(LAST_TAG)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
gen_tests:
cd fstest/fstests && go generate
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

340
README.md
View File

@@ -1,57 +1,325 @@
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
% rclone(1) User Manual
% Nick Craig-Wood
% Jul 7, 2014
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[G+](https://google.com/+RcloneOrg)
Rclone
======
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
[![Logo](http://rclone.org/img/rclone-120x120.png)](http://rclone.org/)
Rclone is a command line program to sync files and directories to and from
* Amazon Drive
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
* QingStor
* SFTP
* Yandex Disk
* Amazon S3
* Openstack Swift / Rackspace cloud files / Memset Memstore
* Dropbox
* Google Cloud Storage
* The local filesystem
Features
* MD5/SHA1 hashes checked at all times for file integrity
* MD5SUMs checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
* Optional encryption (Crypt)
* Optional FUSE mount
* Sync mode to make a directory identical
* Check mode to check all MD5SUMs
* Can sync to and from network, eg two different Drive accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
See the Home page for more documentation and configuration walkthroughs.
* https://rclone.org/
* http://rclone.org/
Install
-------
Rclone is a Go program and comes as a single binary file.
Download the binary for your OS from
* http://rclone.org/downloads/
Or alternatively if you have Go installed use
go install github.com/ncw/rclone
and this will build the binary in `$GOPATH/bin`.
Configure
---------
First you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file
`.rclone.conf` in your home directory by default. (You can use the
`--config` option to choose a different config file.)
The easiest way to make the config is to run rclone with the config
option, Eg
rclone config
Usage
-----
Rclone syncs a directory tree from local to remote.
Its basic syntax is
Syntax: [options] subcommand <parameters> <parameters...>
See below for how to specify the source and destination paths.
Subcommands
-----------
rclone copy source:path dest:path
Copy the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Doesn't delete files from the destination.
rclone sync source:path dest:path
Sync the source to the destination. Doesn't transfer
unchanged files, testing first by modification time then by
MD5SUM. Deletes any files that exist in source that don't
exist in destination. Since this can cause data loss, test
first with the `--dry-run` flag.
rclone ls [remote:path]
List all the objects in the the path with sizes.
rclone lsl [remote:path]
List all the objects in the the path with sizes and timestamps.
rclone lsd [remote:path]
List all directories/objects/buckets in the the path.
rclone mkdir remote:path
Make the path if it doesn't already exist
rclone rmdir remote:path
Remove the path. Note that you can't remove a path with
objects in it, use purge for that.
rclone purge remote:path
Remove the path and all of its contents.
rclone check source:path dest:path
Checks the files in the source and destination match. It
compares sizes and MD5SUMs and prints a report of files which
don't match. It doesn't alter the source or destination.
rclone md5sum remote:path
Produces an md5sum file for all the objects in the path. This is in
the same format as the standard md5sum tool produces.
General options:
```
--checkers=8: Number of checkers to run in parallel.
--config="~/.rclone.conf": Config file.
-n, --dry-run=false: Do a trial run with no permanent changes
--modify-window=1ns: Max time diff to be considered the same
-q, --quiet=false: Print as little stuff as possible
--stats=1m0s: Interval to print stats
--transfers=4: Number of file transfers to run in parallel.
-v, --verbose=false: Print lots more stuff
```
Developer options:
```
--cpuprofile="": Write cpu profile to file
```
Local Filesystem
----------------
Paths are specified as normal filesystem paths, so
rclone sync /home/source /tmp/destination
Will sync `/home/source` to `/tmp/destination`
Swift / Rackspace cloudfiles / Memset Memstore
----------------------------------------------
Paths are specified as remote:container (or remote: for the `lsd`
command.) You may put subdirectories in too, eg
`remote:container/path/to/dir`.
So to copy a local directory to a swift container called backup:
rclone sync /home/source swift:backup
The modified time is stored as metadata on the object as
`X-Object-Meta-Mtime` as floating point since the epoch.
This is a defacto standard (used in the official python-swiftclient
amongst others) for storing the modification time (as read using
os.Stat) for an object.
Amazon S3
---------
Paths are specified as remote:bucket. You may put subdirectories in
too, eg `remote:bucket/path/to/dir`.
So to copy a local directory to a s3 container called backup
rclone sync /home/source s3:backup
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch.
Google drive
------------
Paths are specified as remote:path Drive paths may be as deep as required.
The initial setup for drive involves getting a token from Google drive
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source remote:backup
Google drive stores modification times accurate to 1 ms natively.
Dropbox
-------
Paths are specified as remote:path Dropbox paths may be as deep as required.
The initial setup for dropbox involves getting a token from Dropbox
which you need to do in your browser. `rclone config` walks you
through it.
To copy a local directory to a drive directory called backup
rclone copy /home/source dropbox:backup
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone". Dropbox datastores are limited
to 100,000 rows so this is the maximum number of files rclone can
manage on Dropbox.
Google Cloud Storage
--------------------
Paths are specified as remote:path Google Cloud Storage paths may be
as deep as required.
The initial setup for Google Cloud Storage involves getting a token
from Google which you need to do in your browser. `rclone config`
walks you through it.
To copy a local directory to a google cloud storage directory called backup
rclone copy /home/source remote:backup
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Single file copies
------------------
Rclone can copy single files
rclone src:path/to/file dst:path/dir
Or
rclone src:path/to/file dst:path/to/file
Note that you can't rename the file if you are copying from one file to another.
License
-------
This is free software under the terms of MIT the license (check the
COPYING file included in this package).
Bugs
----
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
* quota is 100.0 requests/second/user
* Empty directories left behind with Local and Drive
* eg purging a local directory with subdirectories doesn't work
Changelog
---------
* v1.02 - 2014-07-19
* Implement Dropbox remote
* Implement Google Cloud Storage remote
* Verify Md5sums and Sizes after copies
* Remove times from "ls" command - lists sizes only
* Add add "lsl" - lists times and sizes
* Add "md5sum" command
* v1.01 - 2014-07-04
* drive: fix transfer of big files using up lots of memory
* v1.00 - 2014-07-03
* drive: fix whole second dates
* v0.99 - 2014-06-26
* Fix --dry-run not working
* Make compatible with go 1.1
* v0.98 - 2014-05-30
* s3: Treat missing Content-Length as 0 for some ceph installations
* rclonetest: add file with a space in
* v0.97 - 2014-05-05
* Implement copying of single files
* s3 & swift: support paths inside containers/buckets
* v0.96 - 2014-04-24
* drive: Fix multiple files of same name being created
* drive: Use o.Update and fs.Put to optimise transfers
* Add version number, -V and --version
* v0.95 - 2014-03-28
* rclone.org: website, docs and graphics
* drive: fix path parsing
* v0.94 - 2014-03-27
* Change remote format one last time
* GNU style flags
* v0.93 - 2014-03-16
* drive: store token in config file
* cross compile other versions
* set strict permissions on config file
* v0.92 - 2014-03-15
* Config fixes and --config option
* v0.91 - 2014-03-15
* Make config file
* v0.90 - 2013-06-27
* Project named rclone
* v0.00 - 2012-11-18
* Project started
Contact and support
-------------------
The project website is at:
* https://github.com/ncw/rclone
There you can file bug reports, ask for help or send pull requests.
Authors
-------
* Nick Craig-Wood <nick@craig-wood.com>
Contributors
------------
* Your name goes here!

View File

@@ -1,36 +0,0 @@
Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
Making a release
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
* make test
* make tag
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* # Update version number in snapcraft.yml
* git commit -a -v -m "Version v1.XX"
* make retag
* # Set the GOPATH for a current stable go compiler
* make cross
* git push --tags origin master
* git push --tags origin master:stable # update the stable branch for packager.io
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
* make fetch_windows
* make upload
* make upload_website
* make upload_github
* make startdev
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* make update
* git status
* git add new files
* carry forward any patches to vendor stuff
* git commit -a -v
Make the version number be just in a file?

File diff suppressed because it is too large Load Diff

View File

@@ -1,73 +0,0 @@
// Test AmazonCloudDrive filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package amazonclouddrive_test
import (
"testing"
"github.com/ncw/rclone/amazonclouddrive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
fstests.RemoteName = "TestAmazonCloudDrive:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

File diff suppressed because it is too large Load Diff

View File

@@ -1,76 +0,0 @@
// Test AzureBlob filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
// +build go1.7
package azureblob_test
import (
"testing"
"github.com/ncw/rclone/azureblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
fstests.RemoteName = "TestAzureBlob:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,6 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.7
package azureblob

View File

@@ -1,301 +0,0 @@
package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/fs"
)
// Error describes a B2 error response
type Error struct {
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
Code string `json:"code"` // A single-identifier code that identifies the error.
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
}
// Error statisfies the error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
}
// Fatal statisfies the Fatal interface
//
// It indicates which errors should be treated as fatal
func (e *Error) Fatal() bool {
return e.Status == 403 // 403 errors shouldn't be retried
}
var _ fs.Fataler = (*Error)(nil)
// Account describes a B2 account
type Account struct {
ID string `json:"accountId"` // The identifier for the account.
}
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
// language Java. It is intended to be compatible with Java's time
// long. For example, it can be passed directly into the java call
// Date.setTime(long time).
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
func (t *Timestamp) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := (time.Time)(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
//
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is unitialised
func (t Timestamp) IsZero() bool {
return (time.Time)(t).IsZero()
}
// Equal compares two timestamps
//
// If either are !IsZero then it returns false
func (t Timestamp) Equal(s Timestamp) bool {
if (time.Time)(t).IsZero() {
return false
}
if (time.Time)(s).IsZero() {
return false
}
return (time.Time)(t).Equal((time.Time)(s))
}
// File is info about a file
type File struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
Size int64 `json:"size"` // The number of bytes in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
}
// ListBucketsResponse is as returned from the b2_list_buckets call
type ListBucketsResponse struct {
Buckets []Bucket `json:"buckets"`
}
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
}
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
type ListFileNamesResponse struct {
Files []File `json:"files"` // An array of objects, each one describing one file.
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
}
// GetUploadURLRequest is passed to b2_get_upload_url
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
}
// GetUploadURLResponse is received from b2_get_upload_url
type GetUploadURLResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
AccountID string `json:"accountId"` // Your account ID.
BucketID string `json:"bucketId"` // The bucket that the file is in.
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
type DeleteBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
}
// DeleteFileRequest is used to delete a file version
type DeleteFileRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
Name string `json:"fileName"` // The name of this file.
}
// HideFileRequest is used to delete a file
type HideFileRequest struct {
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
Name string `json:"fileName"` // The name of the file to hide.
}
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
type GetFileInfoRequest struct {
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
}
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
//
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the
// Java call Date.setTime(long time).
//
// If the caller knows the SHA1 of the entire large file being
// uploaded, Backblaze recommends using large_file_sha1 as the name,
// and a 40 byte hex string representing the SHA1.
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
type GetUploadPartURLRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// GetUploadPartURLResponse is received from b2_get_upload_url
type GetUploadPartURLResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
}
// UploadPartResponse is the response to b2_upload_part
type UploadPartResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
}
// FinishLargeFileRequest is passed to b2_finish_large_file
//
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
//
// Large files do not have a SHA1 checksum. The value will always be "none".
type FinishLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
}
// CancelLargeFileRequest is passed to b2_finish_large_file
//
// The response is a CancelLargeFileResponse
type CancelLargeFileRequest struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
}
// CancelLargeFileResponse is the response to CancelLargeFileRequest
type CancelLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
Name string `json:"fileName"` // The name of this file.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
}

View File

@@ -1,87 +0,0 @@
package api_test
import (
"testing"
"time"
"github.com/ncw/rclone/b2/api"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
func TestTimestampMarshalJSON(t *testing.T) {
resB, err := t0.MarshalJSON()
res := string(resB)
require.NoError(t, err)
assert.Equal(t, "3661123", res)
resB, err = t1.MarshalJSON()
res = string(resB)
require.NoError(t, err)
assert.Equal(t, "981173106123", res)
}
func TestTimestampUnmarshalJSON(t *testing.T) {
var tActual api.Timestamp
err := tActual.UnmarshalJSON([]byte("981173106123"))
require.NoError(t, err)
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())
assert.False(t, t1.IsZero())
}
func TestTimestampEqual(t *testing.T) {
assert.False(t, emptyT.Equal(emptyT))
assert.False(t, t0.Equal(emptyT))
assert.False(t, emptyT.Equal(t0))
assert.False(t, t0.Equal(t1))
assert.False(t, t1.Equal(t0))
assert.True(t, t0.Equal(t0))
assert.True(t, t1.Equal(t1))
}

1414
b2/b2.go

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
package b2
import (
"testing"
"time"
"github.com/ncw/rclone/fstest"
)
// Test b2 string encoding
// https://www.backblaze.com/b2/docs/string_encoding.html
var encodeTest = []struct {
fullyEncoded string
minimallyEncoded string
plainText string
}{
{fullyEncoded: "%20", minimallyEncoded: "+", plainText: " "},
{fullyEncoded: "%21", minimallyEncoded: "!", plainText: "!"},
{fullyEncoded: "%22", minimallyEncoded: "%22", plainText: "\""},
{fullyEncoded: "%23", minimallyEncoded: "%23", plainText: "#"},
{fullyEncoded: "%24", minimallyEncoded: "$", plainText: "$"},
{fullyEncoded: "%25", minimallyEncoded: "%25", plainText: "%"},
{fullyEncoded: "%26", minimallyEncoded: "%26", plainText: "&"},
{fullyEncoded: "%27", minimallyEncoded: "'", plainText: "'"},
{fullyEncoded: "%28", minimallyEncoded: "(", plainText: "("},
{fullyEncoded: "%29", minimallyEncoded: ")", plainText: ")"},
{fullyEncoded: "%2A", minimallyEncoded: "*", plainText: "*"},
{fullyEncoded: "%2B", minimallyEncoded: "%2B", plainText: "+"},
{fullyEncoded: "%2C", minimallyEncoded: "%2C", plainText: ","},
{fullyEncoded: "%2D", minimallyEncoded: "-", plainText: "-"},
{fullyEncoded: "%2E", minimallyEncoded: ".", plainText: "."},
{fullyEncoded: "%2F", minimallyEncoded: "/", plainText: "/"},
{fullyEncoded: "%30", minimallyEncoded: "0", plainText: "0"},
{fullyEncoded: "%31", minimallyEncoded: "1", plainText: "1"},
{fullyEncoded: "%32", minimallyEncoded: "2", plainText: "2"},
{fullyEncoded: "%33", minimallyEncoded: "3", plainText: "3"},
{fullyEncoded: "%34", minimallyEncoded: "4", plainText: "4"},
{fullyEncoded: "%35", minimallyEncoded: "5", plainText: "5"},
{fullyEncoded: "%36", minimallyEncoded: "6", plainText: "6"},
{fullyEncoded: "%37", minimallyEncoded: "7", plainText: "7"},
{fullyEncoded: "%38", minimallyEncoded: "8", plainText: "8"},
{fullyEncoded: "%39", minimallyEncoded: "9", plainText: "9"},
{fullyEncoded: "%3A", minimallyEncoded: ":", plainText: ":"},
{fullyEncoded: "%3B", minimallyEncoded: ";", plainText: ";"},
{fullyEncoded: "%3C", minimallyEncoded: "%3C", plainText: "<"},
{fullyEncoded: "%3D", minimallyEncoded: "=", plainText: "="},
{fullyEncoded: "%3E", minimallyEncoded: "%3E", plainText: ">"},
{fullyEncoded: "%3F", minimallyEncoded: "%3F", plainText: "?"},
{fullyEncoded: "%40", minimallyEncoded: "@", plainText: "@"},
{fullyEncoded: "%41", minimallyEncoded: "A", plainText: "A"},
{fullyEncoded: "%42", minimallyEncoded: "B", plainText: "B"},
{fullyEncoded: "%43", minimallyEncoded: "C", plainText: "C"},
{fullyEncoded: "%44", minimallyEncoded: "D", plainText: "D"},
{fullyEncoded: "%45", minimallyEncoded: "E", plainText: "E"},
{fullyEncoded: "%46", minimallyEncoded: "F", plainText: "F"},
{fullyEncoded: "%47", minimallyEncoded: "G", plainText: "G"},
{fullyEncoded: "%48", minimallyEncoded: "H", plainText: "H"},
{fullyEncoded: "%49", minimallyEncoded: "I", plainText: "I"},
{fullyEncoded: "%4A", minimallyEncoded: "J", plainText: "J"},
{fullyEncoded: "%4B", minimallyEncoded: "K", plainText: "K"},
{fullyEncoded: "%4C", minimallyEncoded: "L", plainText: "L"},
{fullyEncoded: "%4D", minimallyEncoded: "M", plainText: "M"},
{fullyEncoded: "%4E", minimallyEncoded: "N", plainText: "N"},
{fullyEncoded: "%4F", minimallyEncoded: "O", plainText: "O"},
{fullyEncoded: "%50", minimallyEncoded: "P", plainText: "P"},
{fullyEncoded: "%51", minimallyEncoded: "Q", plainText: "Q"},
{fullyEncoded: "%52", minimallyEncoded: "R", plainText: "R"},
{fullyEncoded: "%53", minimallyEncoded: "S", plainText: "S"},
{fullyEncoded: "%54", minimallyEncoded: "T", plainText: "T"},
{fullyEncoded: "%55", minimallyEncoded: "U", plainText: "U"},
{fullyEncoded: "%56", minimallyEncoded: "V", plainText: "V"},
{fullyEncoded: "%57", minimallyEncoded: "W", plainText: "W"},
{fullyEncoded: "%58", minimallyEncoded: "X", plainText: "X"},
{fullyEncoded: "%59", minimallyEncoded: "Y", plainText: "Y"},
{fullyEncoded: "%5A", minimallyEncoded: "Z", plainText: "Z"},
{fullyEncoded: "%5B", minimallyEncoded: "%5B", plainText: "["},
{fullyEncoded: "%5C", minimallyEncoded: "%5C", plainText: "\\"},
{fullyEncoded: "%5D", minimallyEncoded: "%5D", plainText: "]"},
{fullyEncoded: "%5E", minimallyEncoded: "%5E", plainText: "^"},
{fullyEncoded: "%5F", minimallyEncoded: "_", plainText: "_"},
{fullyEncoded: "%60", minimallyEncoded: "%60", plainText: "`"},
{fullyEncoded: "%61", minimallyEncoded: "a", plainText: "a"},
{fullyEncoded: "%62", minimallyEncoded: "b", plainText: "b"},
{fullyEncoded: "%63", minimallyEncoded: "c", plainText: "c"},
{fullyEncoded: "%64", minimallyEncoded: "d", plainText: "d"},
{fullyEncoded: "%65", minimallyEncoded: "e", plainText: "e"},
{fullyEncoded: "%66", minimallyEncoded: "f", plainText: "f"},
{fullyEncoded: "%67", minimallyEncoded: "g", plainText: "g"},
{fullyEncoded: "%68", minimallyEncoded: "h", plainText: "h"},
{fullyEncoded: "%69", minimallyEncoded: "i", plainText: "i"},
{fullyEncoded: "%6A", minimallyEncoded: "j", plainText: "j"},
{fullyEncoded: "%6B", minimallyEncoded: "k", plainText: "k"},
{fullyEncoded: "%6C", minimallyEncoded: "l", plainText: "l"},
{fullyEncoded: "%6D", minimallyEncoded: "m", plainText: "m"},
{fullyEncoded: "%6E", minimallyEncoded: "n", plainText: "n"},
{fullyEncoded: "%6F", minimallyEncoded: "o", plainText: "o"},
{fullyEncoded: "%70", minimallyEncoded: "p", plainText: "p"},
{fullyEncoded: "%71", minimallyEncoded: "q", plainText: "q"},
{fullyEncoded: "%72", minimallyEncoded: "r", plainText: "r"},
{fullyEncoded: "%73", minimallyEncoded: "s", plainText: "s"},
{fullyEncoded: "%74", minimallyEncoded: "t", plainText: "t"},
{fullyEncoded: "%75", minimallyEncoded: "u", plainText: "u"},
{fullyEncoded: "%76", minimallyEncoded: "v", plainText: "v"},
{fullyEncoded: "%77", minimallyEncoded: "w", plainText: "w"},
{fullyEncoded: "%78", minimallyEncoded: "x", plainText: "x"},
{fullyEncoded: "%79", minimallyEncoded: "y", plainText: "y"},
{fullyEncoded: "%7A", minimallyEncoded: "z", plainText: "z"},
{fullyEncoded: "%7B", minimallyEncoded: "%7B", plainText: "{"},
{fullyEncoded: "%7C", minimallyEncoded: "%7C", plainText: "|"},
{fullyEncoded: "%7D", minimallyEncoded: "%7D", plainText: "}"},
{fullyEncoded: "%7E", minimallyEncoded: "~", plainText: "~"},
{fullyEncoded: "%7F", minimallyEncoded: "%7F", plainText: "\u007f"},
{fullyEncoded: "%E8%87%AA%E7%94%B1", minimallyEncoded: "%E8%87%AA%E7%94%B1", plainText: "自由"},
{fullyEncoded: "%F0%90%90%80", minimallyEncoded: "%F0%90%90%80", plainText: "𐐀"},
}
func TestUrlEncode(t *testing.T) {
for _, test := range encodeTest {
got := urlEncode(test.plainText)
if got != test.minimallyEncoded && got != test.fullyEncoded {
t.Errorf("urlEncode(%q) got %q wanted %q or %q", test.plainText, got, test.minimallyEncoded, test.fullyEncoded)
}
}
}
func TestTimeString(t *testing.T) {
for _, test := range []struct {
in time.Time
want string
}{
{fstest.Time("1970-01-01T00:00:00.000000000Z"), "0"},
{fstest.Time("2001-02-03T04:05:10.123123123Z"), "981173110123"},
{fstest.Time("2001-02-03T05:05:10.123123123+01:00"), "981173110123"},
} {
got := timeString(test.in)
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
}
}
func TestParseTimeString(t *testing.T) {
for _, test := range []struct {
in string
want time.Time
wantError string
}{
{"0", fstest.Time("1970-01-01T00:00:00.000000000Z"), ""},
{"981173110123", fstest.Time("2001-02-03T04:05:10.123000000Z"), ""},
{"", time.Time{}, ""},
{"potato", time.Time{}, `strconv.ParseInt: parsing "potato": invalid syntax`},
} {
o := Object{}
err := o.parseTimeString(test.in)
got := o.modTime
var gotError string
if err != nil {
gotError = err.Error()
}
if test.want != got {
t.Logf("%v: want %v got %v", test.in, test.want, got)
}
if test.wantError != gotError {
t.Logf("%v: want error %v got error %v", test.in, test.wantError, gotError)
}
}
}

View File

@@ -1,73 +0,0 @@
// Test B2 filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package b2_test
import (
"testing"
"github.com/ncw/rclone/b2"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*b2.Object)(nil))
fstests.RemoteName = "TestB2:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,425 +0,0 @@
// Upload large files for b2
//
// Docs - https://www.backblaze.com/b2/docs/large_files.html
package b2
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"fmt"
"hash"
"io"
"strings"
"sync"
"github.com/ncw/rclone/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
)
type hashAppendingReader struct {
h hash.Hash
in io.Reader
hexSum string
hexReader io.Reader
}
// Read returns bytes all bytes from the original reader, then the hex sum
// of what was read so far, then EOF.
func (har *hashAppendingReader) Read(b []byte) (int, error) {
if har.hexReader == nil {
n, err := har.in.Read(b)
if err == io.EOF {
har.in = nil // allow GC
err = nil // allow reading hexSum before EOF
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
har.hexReader = strings.NewReader(har.hexSum)
}
return n, err
}
return har.hexReader.Read(b)
}
// AdditionalLength returns how many bytes the appended hex sum will take up.
func (har *hashAppendingReader) AdditionalLength() int {
return hex.EncodedLen(har.h.Size())
}
// HexSum returns the hash sum as hex. It's only available after the original
// reader has EOF'd. It's an empty string before that.
func (har *hashAppendingReader) HexSum() string {
return har.hexSum
}
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
// after the original reader reaches EOF. The increased size depends on the
// given hash, which may be queried through AdditionalLength()
func newHashAppendingReader(in io.Reader, h hash.Hash) *hashAppendingReader {
withHash := io.TeeReader(in, h)
return &hashAppendingReader{h: h, in: withHash}
}
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
sha1SliceSize := int64(maxParts)
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
} else {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
if parts > maxParts {
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
}
sha1SliceSize = parts
}
modTime := src.ModTime()
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
}
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: o.fs.root + remote,
ContentType: fs.MimeType(src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
up = &largeUpload{
f: f,
o: o,
in: in,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
}
return up, nil
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
}
return upload, nil
}
// returnUploadURL returns the UploadURL to the cache
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
if upload == nil {
return
}
up.uploadMu.Lock()
up.uploads = append(up.uploads, upload)
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(part int64, body []byte) error {
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
// Authorization
//
// An upload authorization token, from b2_get_upload_part_url.
//
// X-Bz-Part-Number
//
// A number from 1 to 10000. The parts uploaded for one file
// must have contiguous numbers, starting with 1.
//
// Content-Length
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",
RootURL: upload.UploadURL,
Body: fs.AccountPart(up.o, in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &size,
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
// On retryable error clear PartUploadURL
if retry {
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
upload = nil
}
up.returnUploadURL(upload)
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", part)
}
up.sha1s[part-1] = in.HexSum()
return err
}
// finish closes off the large upload
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
}
var request = api.FinishLargeFileRequest{
ID: up.id,
SHA1s: up.sha1s,
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
}
return up.o.decodeMetaDataFileInfo(&response)
}
// cancel aborts the large upload
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
}
var request = api.CancelLargeFileRequest{
ID: up.id,
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
n, err := io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
hasMoreParts = false
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(err, errs)
}

View File

@@ -1,202 +0,0 @@
// +build ignore
// Cross compile rclone - in go because I hate bash ;-)
package main
import (
"flag"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"time"
)
var (
// Flags
debug = flag.Bool("d", false, "Print commands instead of running them.")
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
copyAs = flag.String("release", "", "Make copies of the releases with this name")
gitLog = flag.String("git-log", "", "git log to include as well")
include = flag.String("include", "^.*$", "os/arch regexp to include")
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
cgo = flag.Bool("cgo", false, "Use cgo for the build")
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
tags = flag.String("tags", "", "Space separated list of build tags")
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
)
// GOOS/GOARCH pairs we build for
var osarches = []string{
"windows/386",
"windows/amd64",
"darwin/386",
"darwin/amd64",
"linux/386",
"linux/amd64",
"linux/arm",
"linux/arm64",
"linux/mips",
"linux/mipsle",
"freebsd/386",
"freebsd/amd64",
"freebsd/arm",
"netbsd/386",
"netbsd/amd64",
"netbsd/arm",
"openbsd/386",
"openbsd/amd64",
"plan9/386",
"plan9/amd64",
"solaris/amd64",
}
// Special environment flags for a given arch
var archFlags = map[string][]string{
"386": {"GO386=387"},
}
// runEnv - run a shell command with env
func runEnv(args, env []string) {
if *debug {
args = append([]string{"echo"}, args...)
}
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if env != nil {
cmd.Env = append(os.Environ(), env...)
}
if *debug {
log.Printf("args = %v, env = %v\n", args, cmd.Env)
}
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to run %v: %v", args, err)
}
}
// run a shell command
func run(args ...string) {
runEnv(args, nil)
}
// build the binary in dir
func compileArch(version, goos, goarch, dir string) {
log.Printf("Compiling %s/%s", goos, goarch)
output := filepath.Join(dir, "rclone")
if goos == "windows" {
output += ".exe"
}
err := os.MkdirAll(dir, 0777)
if err != nil {
log.Fatalf("Failed to mkdir: %v", err)
}
args := []string{
"go", "build",
"--ldflags", "-s -X github.com/ncw/rclone/fs.Version=" + version,
"-i",
"-o", output,
"-tags", *tags,
"..",
}
env := []string{
"GOOS=" + goos,
"GOARCH=" + goarch,
}
if !*cgo {
env = append(env, "CGO_ENABLED=0")
} else {
env = append(env, "CGO_ENABLED=1")
}
if flags, ok := archFlags[goarch]; ok {
env = append(env, flags...)
}
runEnv(args, env)
if !*compileOnly {
// Now build the zip
run("cp", "-a", "../MANUAL.txt", filepath.Join(dir, "README.txt"))
run("cp", "-a", "../MANUAL.html", filepath.Join(dir, "README.html"))
run("cp", "-a", "../rclone.1", dir)
if *gitLog != "" {
run("cp", "-a", *gitLog, dir)
}
zip := dir + ".zip"
run("zip", "-r9", zip, dir)
if *copyAs != "" {
copyAsZip := strings.Replace(zip, "-"+version, "-"+*copyAs, 1)
run("ln", zip, copyAsZip)
}
run("rm", "-rf", dir)
}
log.Printf("Done compiling %s/%s", goos, goarch)
}
func compile(version string) {
start := time.Now()
wg := new(sync.WaitGroup)
run := make(chan func(), *parallel)
for i := 0; i < *parallel; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for f := range run {
f()
}
}()
}
includeRe, err := regexp.Compile(*include)
if err != nil {
log.Fatalf("Bad -include regexp: %v", err)
}
excludeRe, err := regexp.Compile(*exclude)
if err != nil {
log.Fatalf("Bad -exclude regexp: %v", err)
}
compiled := 0
for _, osarch := range osarches {
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
continue
}
parts := strings.Split(osarch, "/")
if len(parts) != 2 {
log.Fatalf("Bad osarch %q", osarch)
}
goos, goarch := parts[0], parts[1]
userGoos := goos
if goos == "darwin" {
userGoos = "osx"
}
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
run <- func() {
compileArch(version, goos, goarch, dir)
}
compiled++
}
close(run)
wg.Wait()
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Syntax: %s <version>", os.Args[0])
}
version := args[0]
if !*noClean {
run("rm", "-rf", "build")
run("mkdir", "build")
}
err := os.Chdir("build")
if err != nil {
log.Fatalf("Couldn't cd into build dir: %v", err)
}
compile(version)
}

View File

@@ -1,59 +0,0 @@
#!/usr/bin/python
"""
This is a tool to decrypt file names in rclone logs.
Pass two files in, the first should be a crypt mapping generated by
rclone ls --crypt-show-mapping remote:path
The second should be a log file that you want the paths decrypted in.
Note that if the crypt mappings file is large it can take some time to
run.
"""
import re
import sys
# Crypt line
match_crypt = re.compile(r'NOTICE: (.*?): Encrypts to "(.*?)"$')
def read_crypt_map(mapping_file):
"""
Read the crypt mapping file in, creating a dictionary of substitutions
"""
mapping = {}
with open(mapping_file) as fd:
for line in fd:
match = match_crypt.search(line)
if match:
plaintext, ciphertext = match.groups()
plaintexts = plaintext.split("/")
ciphertexts = ciphertext.split("/")
for plain, cipher in zip(plaintexts, ciphertexts):
mapping[cipher] = plain
return mapping
def map_log_file(crypt_map, log_file):
"""
Substitute the crypt_map in the log file.
This uses a straight forward O(N**2) algorithm. I tried using
regexps to speed it up but it made it slower!
"""
with open(log_file) as fd:
for line in fd:
for cipher, plain in crypt_map.iteritems():
line = line.replace(cipher, plain)
sys.stdout.write(line)
def main():
if len(sys.argv) < 3:
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
raise SystemExit(1)
mapping_file, log_file = sys.argv[1:]
crypt_map = read_crypt_map(mapping_file)
map_log_file(crypt_map, log_file)
if __name__ == "__main__":
main()

View File

@@ -1,146 +0,0 @@
#!/usr/bin/python
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
"""
import os
import re
from datetime import datetime
docpath = "docs/content"
outfile = "MANUAL.md"
# Order to add docs segments to make outfile
docs = [
"about.md",
"install.md",
"docs.md",
"remote_setup.md",
"filtering.md",
"overview.md",
# Keep these alphabetical by full name
"amazonclouddrive.md",
"s3.md",
"b2.md",
"box.md",
"crypt.md",
"dropbox.md",
"ftp.md",
"googlecloudstorage.md",
"drive.md",
"http.md",
"hubic.md",
"azureblob.md",
"onedrive.md",
"qingstor.md",
"swift.md",
"sftp.md",
"yandex.md",
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",
]
# Order to put the commands in - any not on here will be in sorted order
commands_order = [
"rclone_config.md",
"rclone_copy.md",
"rclone_sync.md",
"rclone_move.md",
"rclone_delete.md",
"rclone_purge.md",
"rclone_mkdir.md",
"rclone_rmdir.md",
"rclone_check.md",
"rclone_ls.md",
"rclone_lsd.md",
"rclone_lsl.md",
"rclone_md5sum.md",
"rclone_sha1sum.md",
"rclone_size.md",
"rclone_version.md",
"rclone_cleanup.md",
"rclone_dedupe.md",
]
# Docs which aren't made into outfile
ignore_docs = [
"downloads.md",
"privacy.md",
"donate.md",
]
def read_doc(doc):
"""Read file as a string"""
path = os.path.join(docpath, doc)
with open(path) as fd:
contents = fd.read()
parts = contents.split("---\n", 2)
if len(parts) != 3:
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
contents = parts[2].strip()+"\n\n"
# Remove icons
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
# Make [...](/links/) absolute
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
# Interpret provider shortcode
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)
return contents
def check_docs(docpath):
"""Check all the docs are in docpath"""
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
files -= set(ignore_docs)
docs_set = set(docs)
if files == docs_set:
return
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def read_command(command):
doc = read_doc("commands/"+command)
doc = re.sub(r"### Options inherited from parent commands.*$", "", doc, 0, re.S)
doc = doc.strip()+"\n"
return doc
def read_commands(docpath):
"""Reads the commands an makes them into a single page"""
files = set(f for f in os.listdir(docpath + "/commands") if f.endswith(".md"))
docs = []
for command in commands_order:
docs.append(read_command(command))
files.remove(command)
for command in sorted(files):
if command != "rclone.md":
docs.append(read_command(command))
return "\n".join(docs)
def main():
check_docs(docpath)
command_docs = read_commands(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
""" % datetime.now().strftime("%b %d, %Y"))
for doc in docs:
contents = read_doc(doc)
# Substitute the commands into doc.md
if doc == "docs.md":
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
out.write(contents)
print "Written '%s'" % outfile
if __name__ == "__main__":
main()

View File

@@ -1,146 +0,0 @@
// +build ignore
// Build a directory structure with the required number of files in
//
// Run with go run make_test_files.go [flag] <directory>
package main
import (
cryptrand "crypto/rand"
"flag"
"io"
"log"
"math/rand"
"os"
"path/filepath"
)
var (
// Flags
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
directoriesToCreate int
totalDirectories int
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
)
// randomString create a random string for test purposes
func randomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// fileName creates a unique random file or directory name
func fileName() (name string) {
for {
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
name = randomString(length)
if _, found := fileNames[name]; !found {
break
}
}
fileNames[name] = struct{}{}
return name
}
// dir is a directory in the directory heirachy being built up
type dir struct {
name string
depth int
children []*dir
parent *dir
}
// Create a random directory heirachy under d
func (d *dir) createDirectories() {
for totalDirectories < directoriesToCreate {
newDir := &dir{
name: fileName(),
depth: d.depth + 1,
parent: d,
}
d.children = append(d.children, newDir)
totalDirectories++
switch rand.Intn(4) {
case 0:
if d.depth < *maxDepth {
newDir.createDirectories()
}
case 1:
return
}
}
return
}
// list the directory heirachy
func (d *dir) list(path string, output []string) []string {
dirPath := path + "/" + d.name
output = append(output, dirPath)
for _, subDir := range d.children {
output = subDir.list(dirPath, output)
}
return output
}
// writeFile writes a random file at dir/name
func writeFile(dir, name string) {
err := os.MkdirAll(dir, 0777)
if err != nil {
log.Fatalf("Failed to make directory %q: %v", dir, err)
}
path := filepath.Join(dir, name)
fd, err := os.Create(path)
if err != nil {
log.Fatalf("Failed to open file %q: %v", path, err)
}
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
_, err = io.CopyN(fd, cryptrand.Reader, size)
if err != nil {
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
}
err = fd.Close()
if err != nil {
log.Fatalf("Failed to close file %q: %v", path, err)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require 1 directory argument")
}
outputDirectory := args[0]
log.Printf("Output dir %q", outputDirectory)
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
log.Printf("directoriesToCreate %v", directoriesToCreate)
root := &dir{name: outputDirectory, depth: 1}
for totalDirectories < directoriesToCreate {
root.createDirectories()
}
dirs := root.list("", []string{})
for i := 0; i < *numberOfFiles; i++ {
dir := dirs[rand.Intn(len(dirs))]
writeFile(dir, fileName())
}
}

View File

@@ -1,4 +0,0 @@
# Encrypted rclone configuration File
RCLONE_ENCRYPT_V0:
XIkAr3p+y+zai82cHFH8UoW1y1XTe6dpTzo/g4uSwqI2pfsnSSJ4JbAsRZ9nGVpx3NzROKEewlusVHNokiA4/nD4NbT+2DJrpMLg/OtLREICfuRk3tVWPKLGsmA+TLKU+IfQMO4LfrrCe2DF/lW0qA5Xu16E0Vn++jNhbwW2oB+JTkaGka8Ae3CyisM/3NUGnCOG/yb5wLH7ybUstNYPHsNFCiU1brFXQ4DNIbUFMmca+5S44vrOWvhp9QijQXlG7/JjwrkqbB/LK2gMJPTuhY2OW+4tRw1IoCXbWmwJXv5xmhPqanW92A==

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env python
"""
Update the authors.md file with the authors from the git log
"""
import re
import subprocess
AUTHORS = "docs/content/authors.md"
IGNORE = [ "nick@raig-wood.com" ]
def load():
"""
returns a set of emails already in authors.md
"""
with open(AUTHORS) as fd:
authors = fd.read()
emails = set(re.findall(r"<(.*?)>", authors))
emails.update(IGNORE)
return emails
def add_email(name, email):
"""
adds the email passed in to the end of authors.md
"""
print "Adding %s <%s>" % (name, email)
with open(AUTHORS, "a+") as fd:
print >>fd, " * %s <%s>" % (name, email)
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
def main():
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
previous = load()
for line in out.split("\n"):
line = line.strip()
if line == "":
continue
name, email = line.split("|")
if email in previous:
continue
previous.add(email)
add_email(name, email)
if __name__ == "__main__":
main()

View File

@@ -1,50 +0,0 @@
#!/bin/bash
#
# Upload a release
#
# Needs github-release from https://github.com/aktau/github-release
set -e
REPO="rclone"
if [ "$1" == "" ]; then
echo "Syntax: $0 Version"
exit 1
fi
VERSION="$1"
if [ "$GITHUB_USER" == "" ]; then
echo 1>&2 "Need GITHUB_USER environment variable"
exit 1
fi
if [ "$GITHUB_TOKEN" == "" ]; then
echo 1>&2 "Need GITHUB_TOKEN environment variable"
exit 1
fi
echo "Making release ${VERSION}"
github-release release \
--repo ${REPO} \
--tag ${VERSION} \
--name "rclone" \
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
for build in `ls build | grep -v current`; do
echo "Uploading ${build}"
base="${build%.*}"
parts=(${base//-/ })
os=${parts[3]}
arch=${parts[4]}
github-release upload \
--repo ${REPO} \
--tag ${VERSION} \
--name "${build}" \
--file build/${build}
done
github-release info \
--repo ${REPO} \
--tag ${VERSION}
echo "Done"

View File

@@ -1,5 +0,0 @@
@echo off
echo Setting environment variables for mingw+WinFsp compile
set GOPATH=X:\go
set PATH=C:\Program Files\mingw-w64\i686-7.1.0-win32-dwarf-rt_v5-rev0\mingw32\bin;%PATH%
set CPATH=C:\Program Files\WinFsp\inc\fuse

View File

@@ -1,192 +0,0 @@
// Package api has type definitions for box
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
if e.Message != "" {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
// Check Error statisfies the error interface
var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
ItemStatusActive = "active"
ItemStatusTrashed = "trashed"
ItemStatusDeleted = "deleted"
)
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size int64 `json:"size"`
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
}
// ModTime returns the modification time of the item
func (i *Item) ModTime() (t time.Time) {
t = time.Time(i.ContentModifiedAt)
if t.IsZero() {
t = time.Time(i.ModifiedAt)
}
return t
}
// FolderItems is returned from the GetFolderItems call
type FolderItems struct {
TotalCount int `json:"total_count"`
Entries []Item `json:"entries"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
type Parent struct {
ID string `json:"id"`
}
// CreateFolder is the request for Create Folder
type CreateFolder struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadFile is the request for Upload File
type UploadFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// UpdateFileMove is the request for Upload File to change name and parent
type UpdateFileMove struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// CopyFile is the request for Copy File
type CopyFile struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
}
// UploadSessionRequest is uses in Create Upload Session
type UploadSessionRequest struct {
FolderID string `json:"folder_id,omitempty"` // don't pass for update
FileSize int64 `json:"file_size"`
FileName string `json:"file_name,omitempty"` // optional for update
}
// UploadSessionResponse is returned from Create Upload Session
type UploadSessionResponse struct {
TotalParts int `json:"total_parts"`
PartSize int64 `json:"part_size"`
SessionEndpoints struct {
ListParts string `json:"list_parts"`
Commit string `json:"commit"`
UploadPart string `json:"upload_part"`
Status string `json:"status"`
Abort string `json:"abort"`
} `json:"session_endpoints"`
SessionExpiresAt Time `json:"session_expires_at"`
ID string `json:"id"`
Type string `json:"type"`
NumPartsProcessed int `json:"num_parts_processed"`
}
// Part defines the return from upload part call which are passed to commit upload also
type Part struct {
PartID string `json:"part_id"`
Offset int `json:"offset"`
Size int `json:"size"`
Sha1 string `json:"sha1"`
}
// UploadPartResponse is returned from the upload part call
type UploadPartResponse struct {
Part Part `json:"part"`
}
// CommitUpload is used in the Commit Upload call
type CommitUpload struct {
Parts []Part `json:"parts"`
Attributes struct {
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}

1067
box/box.go

File diff suppressed because it is too large Load Diff

View File

@@ -1,73 +0,0 @@
// Test Box filesystem interface
//
// Automatically generated - DO NOT EDIT
// Regenerate with: make gen_tests
package box_test
import (
"testing"
"github.com/ncw/rclone/box"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests"
)
func TestSetup(t *testing.T) {
fstests.NilObject = fs.Object((*box.Object)(nil))
fstests.RemoteName = "TestBox:"
}
// Generic tests for the Fs
func TestInit(t *testing.T) { fstests.TestInit(t) }
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }

View File

@@ -1,270 +0,0 @@
// multpart upload for box
package box
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"sync"
"time"
"github.com/ncw/rclone/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/rest"
"github.com/pkg/errors"
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
RootURL: uploadURL,
}
request := api.UploadSessionRequest{
FileSize: size,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
opts.Path = "/files/" + o.id + "/upload_sessions"
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = replaceReservedChars(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
}
// sha1Digest produces a digest using sha1 as per RFC3230
func sha1Digest(digest []byte) string {
return "sha=" + base64.StdEncoding.EncodeToString(digest)
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
in := bytes.NewReader(chunk)
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
Method: "PUT",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
Body: in,
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
_, _ = in.Seek(0, 0)
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return response, nil
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
RootURL: uploadURL,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum),
},
}
request := api.CommitUpload{
Parts: parts,
}
request.Attributes.ContentModifiedAt = api.Time(modTime)
request.Attributes.ContentCreatedAt = api.Time(modTime)
var body []byte
var resp *http.Response
maxTries := fs.Config.LowLevelRetries
const defaultDelay = 10
var tries int
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
})
delay := defaultDelay
why := "unknown"
if err != nil {
// Sometimes we get 400 Error with
// parts_mismatch immediately after uploading
// the last part. Ignore this error and wait.
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
why = err.Error()
} else {
return nil, err
}
} else {
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated:
break outer
case http.StatusAccepted:
why = "not ready yet"
delayString := resp.Header.Get("Retry-After")
if delayString != "" {
delay, err = strconv.Atoi(delayString)
if err != nil {
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
delay = defaultDelay
}
}
default:
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
}
}
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
time.Sleep(time.Duration(delay) * time.Second)
}
if tries >= maxTries {
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
}
err = json.Unmarshal(body, &result)
if err != nil {
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
}
return result, nil
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
RootURL: uploadURL,
NoResponse: true,
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
chunkSize := session.PartSize
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
}()
// Upload the chunks
remaining := size
position := int64(0)
parts := make([]api.Part, session.TotalParts)
hash := sha1.New()
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < session.TotalParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
reqSize := remaining
if reqSize >= int64(chunkSize) {
reqSize = int64(chunkSize)
}
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
}
// Make the global hash (must be done sequentially)
_, _ = hash.Write(buf)
// Transfer the chunk
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64) {
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(session.ID, position, size, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
}
return
}
parts[part] = partResponse.Part
}(part, position)
// ready for next block
remaining -= chunkSize
position += chunkSize
}
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
return err
}
// Finalise the upload session
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}
if result.TotalCount != 1 || len(result.Entries) != 1 {
return errors.Errorf("multipart upload failed %v - not sure why", o)
}
return o.setMetaData(&result.Entries[0])
}

View File

@@ -1,46 +0,0 @@
// Package all imports all the commands
package all
import (
// Active commands
_ "github.com/ncw/rclone/cmd"
_ "github.com/ncw/rclone/cmd/authorize"
_ "github.com/ncw/rclone/cmd/cat"
_ "github.com/ncw/rclone/cmd/check"
_ "github.com/ncw/rclone/cmd/cleanup"
_ "github.com/ncw/rclone/cmd/cmount"
_ "github.com/ncw/rclone/cmd/config"
_ "github.com/ncw/rclone/cmd/copy"
_ "github.com/ncw/rclone/cmd/copyto"
_ "github.com/ncw/rclone/cmd/cryptcheck"
_ "github.com/ncw/rclone/cmd/cryptdecode"
_ "github.com/ncw/rclone/cmd/dbhashsum"
_ "github.com/ncw/rclone/cmd/dedupe"
_ "github.com/ncw/rclone/cmd/delete"
_ "github.com/ncw/rclone/cmd/genautocomplete"
_ "github.com/ncw/rclone/cmd/gendocs"
_ "github.com/ncw/rclone/cmd/info"
_ "github.com/ncw/rclone/cmd/listremotes"
_ "github.com/ncw/rclone/cmd/ls"
_ "github.com/ncw/rclone/cmd/ls2"
_ "github.com/ncw/rclone/cmd/lsd"
_ "github.com/ncw/rclone/cmd/lsjson"
_ "github.com/ncw/rclone/cmd/lsl"
_ "github.com/ncw/rclone/cmd/md5sum"
_ "github.com/ncw/rclone/cmd/memtest"
_ "github.com/ncw/rclone/cmd/mkdir"
_ "github.com/ncw/rclone/cmd/mount"
_ "github.com/ncw/rclone/cmd/move"
_ "github.com/ncw/rclone/cmd/moveto"
_ "github.com/ncw/rclone/cmd/ncdu"
_ "github.com/ncw/rclone/cmd/obscure"
_ "github.com/ncw/rclone/cmd/purge"
_ "github.com/ncw/rclone/cmd/rcat"
_ "github.com/ncw/rclone/cmd/rmdir"
_ "github.com/ncw/rclone/cmd/rmdirs"
_ "github.com/ncw/rclone/cmd/sha1sum"
_ "github.com/ncw/rclone/cmd/size"
_ "github.com/ncw/rclone/cmd/sync"
_ "github.com/ncw/rclone/cmd/tree"
_ "github.com/ncw/rclone/cmd/version"
)

View File

@@ -1,45 +0,0 @@
package cmd
// Atexit handling
import (
"os"
"os/signal"
"sync"
"github.com/ncw/rclone/fs"
)
var (
atExitFns []func()
atExitOnce sync.Once
atExitRegisterOnce sync.Once
)
// AtExit registers a function to be added on exit
func AtExit(fn func()) {
atExitFns = append(atExitFns, fn)
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
// tidied up properly
atExitRegisterOnce.Do(func() {
go func() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
sig := <-ch
fs.Infof(nil, "Signal received: %s", sig)
runAtExitFunctions()
fs.Infof(nil, "Exiting...")
os.Exit(0)
}()
})
}
// Runs all the AtExit functions if they haven't been run already
func runAtExitFunctions() {
atExitOnce.Do(func() {
for _, fn := range atExitFns {
fn()
}
})
}

View File

@@ -1,24 +0,0 @@
package authorize
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "authorize",
Short: `Remote authorization.`,
Long: `
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 3, command, args)
fs.Authorize(args)
},
}

View File

@@ -1,80 +0,0 @@
package cat
import (
"io"
"io/ioutil"
"log"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
// Globals
var (
head = int64(0)
tail = int64(0)
offset = int64(0)
count = int64(-1)
discard = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
}
var commandDefintion = &cobra.Command{
Use: "cat remote:path",
Short: `Concatenates any files and sends them to stdout.`,
Long: `
rclone cat sends any files to standard output.
You can use it like this to output a single file
rclone cat remote:path/to/file
Or like this to output any file in dir or subdirectories.
rclone cat remote:path/to/dir
Or like this to output any .txt files in dir or subdirectories.
rclone --include "*.txt" cat remote:path/to/dir
Use the --head flag to print characters only at the start, --tail for
the end and --offset and --count to print a section in the middle.
Note that if offset is negative it will count from the end, so
--offset -1 --count 1 is equivalent to --tail 1.
`,
Run: func(command *cobra.Command, args []string) {
usedOffset := offset != 0 || count >= 0
usedHead := head > 0
usedTail := tail > 0
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
}
if head > 0 {
offset = 0
count = head
}
if tail > 0 {
offset = -tail
count = -1
}
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
var w io.Writer = os.Stdout
if discard {
w = ioutil.Discard
}
cmd.Run(false, false, command, func() error {
return fs.Cat(fsrc, w, offset, count)
})
},
}

View File

@@ -1,45 +0,0 @@
package check
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
// Globals
var (
download = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
}
var commandDefintion = &cobra.Command{
Use: "check source:path dest:path",
Short: `Checks the files in the source and destination match.`,
Long: `
Checks the files in the source and destination match. It compares
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
match. It doesn't alter the source or destination.
If you supply the --size-only flag, it will only compare the sizes not
the hashes as well. Use this for a quick check.
If you supply the --download flag, it will download the data from
both remotes and check them against each other on the fly. This can
be useful for remotes that don't support hashes or if you really want
to check all the data.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(false, false, command, func() error {
if download {
return fs.CheckDownload(fdst, fsrc)
}
return fs.Check(fdst, fsrc)
})
},
}

View File

@@ -1,27 +0,0 @@
package cleanup
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible`,
Long: `
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
return fs.CleanUp(fsrc)
})
},
}

View File

@@ -1,375 +0,0 @@
// Package cmd implemnts the rclone command
//
// It is in a sub package so it's internals can be re-used elsewhere
package cmd
// FIXME only attach the remote flags when using a remote???
// would probably mean bringing all the flags in to here? Or define some flagsets in fs...
import (
"fmt"
"log"
"os"
"path"
"regexp"
"runtime"
"runtime/pprof"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/ncw/rclone/fs"
)
// Globals
var (
// Flags
cpuProfile = fs.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = fs.StringP("memprofile", "", "", "Write memory profile to file")
statsInterval = fs.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
dataRateUnit = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
version bool
retries = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
)
// Root is the main rclone command
var Root = &cobra.Command{
Use: "rclone",
Short: "Sync files and directories to and from local and remote object stores - " + fs.Version,
Long: `
Rclone is a command line program to sync files and directories to and
from various cloud storage systems and using file transfer services, such as:
* Amazon Drive
* Amazon S3
* Backblaze B2
* Box
* Dropbox
* FTP
* Google Cloud Storage
* Google Drive
* HTTP
* Hubic
* Microsoft Azure Blob Storage
* Microsoft OneDrive
* Openstack Swift / Rackspace cloud files / Memset Memstore
* QingStor
* SFTP
* Yandex Disk
* The local filesystem
Features
* MD5/SHA1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* Copy mode to just copy new/changed files
* Sync (one way) mode to make a directory identical
* Check mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts
See the home page for installation, usage, documentation, changelog
and configuration walkthroughs.
* https://rclone.org/
`,
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
runAtExitFunctions()
},
}
// runRoot implements the main rclone command with no subcommands
func runRoot(cmd *cobra.Command, args []string) {
if version {
ShowVersion()
os.Exit(0)
} else {
_ = Root.Usage()
fmt.Fprintf(os.Stderr, "Command not found.\n")
os.Exit(1)
}
}
func init() {
Root.Run = runRoot
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
cobra.OnInitialize(initConfig)
}
// ShowVersion prints the version to stdout
func ShowVersion() {
fmt.Printf("rclone %s\n", fs.Version)
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf("- go version: %s\n", runtime.Version())
}
// newFsFile creates a dst Fs from a name but may point to a file.
//
// It returns a string with the file name if points to a file
func newFsFile(remote string) (fs.Fs, string) {
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
fs.Stats.Error()
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := fsInfo.NewFs(configName, fsPath)
switch err {
case fs.ErrorIsFile:
return f, path.Base(fsPath)
case nil:
return f, ""
default:
fs.Stats.Error()
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
}
// newFsSrc creates a src Fs from a name
//
// It returns a string with the file name if limiting to one file
//
// This can point to a file
func newFsSrc(remote string) (fs.Fs, string) {
f, fileName := newFsFile(remote)
if fileName != "" {
if !fs.Config.Filter.InActive() {
fs.Stats.Error()
log.Fatalf("Can't limit to single files when using filters: %v", remote)
}
// Limit transfers to this file
err := fs.Config.Filter.AddFile(fileName)
if err != nil {
fs.Stats.Error()
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
// Set --no-traverse as only one file
fs.Config.NoTraverse = true
}
return f, fileName
}
// newFsDst creates a dst Fs from a name
//
// This must point to a directory
func newFsDst(remote string) fs.Fs {
f, err := fs.NewFs(remote)
if err != nil {
fs.Stats.Error()
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return f
}
// NewFsSrcDst creates a new src and dst fs from the arguments
func NewFsSrcDst(args []string) (fs.Fs, fs.Fs) {
fsrc, _ := newFsSrc(args[0])
fdst := newFsDst(args[1])
fs.CalculateModifyWindow(fdst, fsrc)
return fsrc, fdst
}
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
// If src is a file then srcFileName and dstFileName will be non-empty
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
fsrc, srcFileName = newFsSrc(args[0])
// If copying a file...
dstRemote := args[1]
if srcFileName != "" {
dstRemote, dstFileName = fs.RemoteSplit(dstRemote)
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
log.Fatalf("%q is a directory", args[1])
}
}
fdst = newFsDst(dstRemote)
fs.CalculateModifyWindow(fdst, fsrc)
return
}
// NewFsSrc creates a new src fs from the arguments
func NewFsSrc(args []string) fs.Fs {
fsrc, _ := newFsSrc(args[0])
fs.CalculateModifyWindow(fsrc)
return fsrc
}
// NewFsDst creates a new dst fs from the arguments
//
// Dst fs-es can't point to single files
func NewFsDst(args []string) fs.Fs {
fdst := newFsDst(args[0])
fs.CalculateModifyWindow(fdst)
return fdst
}
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
dstRemote, dstFileName := fs.RemoteSplit(args[0])
if dstRemote == "" {
dstRemote = "."
}
if dstFileName == "" {
log.Fatalf("%q is a directory", args[0])
}
fdst = newFsDst(dstRemote)
fs.CalculateModifyWindow(fdst)
return
}
// ShowStats returns true if the user added a `--stats` flag to the command line.
//
// This is called by Run to override the default value of the
// showStats passed in.
func ShowStats() bool {
statsIntervalFlag := pflag.Lookup("stats")
return statsIntervalFlag != nil && statsIntervalFlag.Changed
}
// Run the function with stats and retries if required
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
var err error
var stopStats chan struct{}
if !showStats && ShowStats() {
showStats = true
}
if showStats {
stopStats = StartStats()
}
for try := 1; try <= *retries; try++ {
err = f()
if !Retry || (err == nil && !fs.Stats.Errored()) {
if try > 1 {
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
}
break
}
if fs.IsFatalError(err) {
fs.Errorf(nil, "Fatal error received - not attempting retries")
break
}
if fs.IsNoRetryError(err) {
fs.Errorf(nil, "Can't retry this error - not attempting retries")
break
}
if err != nil {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
}
}
if showStats {
close(stopStats)
}
if err != nil {
log.Fatalf("Failed to %s: %v", cmd.Name(), err)
}
if showStats && (fs.Stats.Errored() || *statsInterval > 0) {
fs.Stats.Log()
}
fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
if fs.Stats.Errored() {
os.Exit(1)
}
}
// CheckArgs checks there are enough arguments and prints a message if not
func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
if len(args) < MinArgs {
_ = cmd.Usage()
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.Name(), MinArgs)
os.Exit(1)
} else if len(args) > MaxArgs {
_ = cmd.Usage()
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
os.Exit(1)
}
}
// StartStats prints the stats every statsInterval
//
// It returns a channel which should be closed to stop the stats.
func StartStats() chan struct{} {
stopStats := make(chan struct{})
if *statsInterval > 0 {
go func() {
ticker := time.NewTicker(*statsInterval)
for {
select {
case <-ticker.C:
fs.Stats.Log()
case <-stopStats:
ticker.Stop()
return
}
}
}()
}
return stopStats
}
// initConfig is run by cobra after initialising the flags
func initConfig() {
// Start the logger
fs.InitLogging()
// Load the rest of the config now we have started the logger
fs.LoadConfig()
// Write the args for debug purposes
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
// Setup CPU profiling if desired
if *cpuProfile != "" {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
AtExit(func() {
pprof.StopCPUProfile()
})
}
// Setup memory profiling if desired
if *memProfile != "" {
AtExit(func() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
err = f.Close()
if err != nil {
fs.Stats.Error()
log.Fatal(err)
}
})
}
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
fs.Config.DataRateUnit = "bytes"
} else {
fs.Config.DataRateUnit = *dataRateUnit
}
}

View File

@@ -1,663 +0,0 @@
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"os"
"path"
"runtime"
"sync"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
const fhUnset = ^uint64(0)
// FS represents the top level filing system
type FS struct {
FS *mountlib.FS
f fs.Fs
openDirs *openFiles
openFilesWr *openFiles
openFilesRd *openFiles
ready chan (struct{})
}
// NewFS makes a new FS
func NewFS(f fs.Fs) *FS {
fsys := &FS{
FS: mountlib.NewFS(f),
f: f,
openDirs: newOpenFiles(0x01),
openFilesWr: newOpenFiles(0x02),
openFilesRd: newOpenFiles(0x03),
ready: make(chan (struct{})),
}
return fsys
}
type openFiles struct {
mu sync.Mutex
mark uint8
nodes []mountlib.Noder
}
func newOpenFiles(mark uint8) *openFiles {
return &openFiles{
mark: mark,
}
}
// Open a node returning a file handle
func (of *openFiles) Open(node mountlib.Noder) (fh uint64) {
of.mu.Lock()
defer of.mu.Unlock()
var i int
var oldNode mountlib.Noder
for i, oldNode = range of.nodes {
if oldNode == nil {
of.nodes[i] = node
goto found
}
}
of.nodes = append(of.nodes, node)
i = len(of.nodes) - 1
found:
return uint64((i << 8) | int(of.mark))
}
// InRange to see if this fh could be one of ours
func (of *openFiles) InRange(fh uint64) bool {
return uint8(fh) == of.mark
}
// get the node for fh, call with the lock held
func (of *openFiles) get(fh uint64) (i int, node mountlib.Noder, errc int) {
receivedMark := uint8(fh)
if receivedMark != of.mark {
fs.Debugf(nil, "Bad file handle: bad mark 0x%X != 0x%X: 0x%X", receivedMark, of.mark, fh)
return i, nil, -fuse.EBADF
}
i64 := fh >> 8
if i64 > uint64(len(of.nodes)) {
fs.Debugf(nil, "Bad file handle: too big: 0x%X", fh)
return i, nil, -fuse.EBADF
}
i = int(i64)
node = of.nodes[i]
if node == nil {
fs.Debugf(nil, "Bad file handle: nil node: 0x%X", fh)
return i, nil, -fuse.EBADF
}
return i, node, 0
}
// Get the node for the file handle
func (of *openFiles) Get(fh uint64) (node mountlib.Noder, errc int) {
of.mu.Lock()
_, node, errc = of.get(fh)
of.mu.Unlock()
return
}
// Close the node
func (of *openFiles) Close(fh uint64) (errc int) {
of.mu.Lock()
i, _, errc := of.get(fh)
if errc == 0 {
of.nodes[i] = nil
}
of.mu.Unlock()
return
}
// lookup a Node given a path
func (fsys *FS) lookupNode(path string) (node mountlib.Node, errc int) {
node, err := fsys.FS.Lookup(path)
return node, translateError(err)
}
// lookup a Dir given a path
func (fsys *FS) lookupDir(path string) (dir *mountlib.Dir, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
dir, ok := node.(*mountlib.Dir)
if !ok {
return nil, -fuse.ENOTDIR
}
return dir, 0
}
// lookup a parent Dir given a path returning the dir and the leaf
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *mountlib.Dir, errc int) {
parentDir, leaf := path.Split(filePath)
dir, errc = fsys.lookupDir(parentDir)
return leaf, dir, errc
}
// lookup a File given a path
func (fsys *FS) lookupFile(path string) (file *mountlib.File, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
file, ok := node.(*mountlib.File)
if !ok {
return nil, -fuse.EISDIR
}
return file, 0
}
// Get the underlying openFile handle from the file handle
func (fsys *FS) getOpenFilesFromFh(fh uint64) (of *openFiles, errc int) {
switch {
case fsys.openFilesRd.InRange(fh):
return fsys.openFilesRd, 0
case fsys.openFilesWr.InRange(fh):
return fsys.openFilesWr, 0
case fsys.openDirs.InRange(fh):
return fsys.openDirs, 0
}
return nil, -fuse.EBADF
}
// Get the underlying handle from the file handle
func (fsys *FS) getHandleFromFh(fh uint64) (handle mountlib.Noder, errc int) {
of, errc := fsys.getOpenFilesFromFh(fh)
if errc != 0 {
return nil, errc
}
return of.Get(fh)
}
// get a node from the path or from the fh if not fhUnset
func (fsys *FS) getNode(path string, fh uint64) (node mountlib.Node, errc int) {
if fh == fhUnset {
node, errc = fsys.lookupNode(path)
} else {
var n mountlib.Noder
n, errc = fsys.getHandleFromFh(fh)
if errc == 0 {
node = n.Node()
}
}
return
}
// stat fills up the stat block for Node
func (fsys *FS) stat(node mountlib.Node, stat *fuse.Stat_t) (errc int) {
var Size uint64
var Blocks uint64
var modTime time.Time
var Mode os.FileMode
switch x := node.(type) {
case *mountlib.Dir:
modTime = x.ModTime()
Mode = mountlib.DirPerms | fuse.S_IFDIR
case *mountlib.File:
var err error
modTime, Size, Blocks, err = x.Attr(mountlib.NoModTime)
if err != nil {
return translateError(err)
}
Mode = mountlib.FilePerms | fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = mountlib.UID
stat.Gid = mountlib.GID
//stat.Rdev
stat.Size = int64(Size)
t := fuse.NewTimespec(modTime)
stat.Atim = t
stat.Mtim = t
stat.Ctim = t
stat.Blksize = 512
stat.Blocks = int64(Blocks)
stat.Birthtim = t
// fs.Debugf(nil, "stat = %+v", *stat)
return 0
}
// Init is called after the filesystem is ready
func (fsys *FS) Init() {
defer fs.Trace(fsys.f, "")("")
close(fsys.ready)
}
// Destroy is called when it is unmounted (note that depending on how
// the file system is terminated the file system may not receive the
// Destroy call).
func (fsys *FS) Destroy() {
defer fs.Trace(fsys.f, "")("")
}
// Getattr reads the attributes for path
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
defer fs.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
node, errc := fsys.getNode(path, fh)
if errc == 0 {
errc = fsys.stat(node, stat)
}
return
}
// Opendir opens path as a directory
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
defer fs.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
dir, errc := fsys.lookupDir(path)
if errc == 0 {
fh = fsys.openDirs.Open(dir)
} else {
fh = fhUnset
}
return
}
// Readdir reads the directory at dirPath
func (fsys *FS) Readdir(dirPath string,
fill func(name string, stat *fuse.Stat_t, ofst int64) bool,
ofst int64,
fh uint64) (errc int) {
itemsRead := -1
defer fs.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
node, errc := fsys.openDirs.Get(fh)
if errc != 0 {
return errc
}
dir, ok := node.(*mountlib.Dir)
if !ok {
return -fuse.ENOTDIR
}
items, err := dir.ReadDirAll()
if err != nil {
return translateError(err)
}
// Optionally, create a struct stat that describes the file as
// for getattr (but FUSE only looks at st_ino and the
// file-type bits of st_mode).
//
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
// use the full stat information - a Useful optimization on
// Windows.
//
// NB we are using the first mode for readdir: The readdir
// implementation ignores the offset parameter, and passes
// zero to the filler function's offset. The filler function
// will not return '1' (unless an error happens), so the whole
// directory is read in a single readdir operation.
fill(".", nil, 0)
fill("..", nil, 0)
for _, item := range items {
name := path.Base(item.Obj.Remote())
fill(name, nil, 0)
}
itemsRead = len(items)
return 0
}
// Releasedir finished reading the directory
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
return fsys.openDirs.Close(fh)
}
// Statfs reads overall stats on the filessystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer fs.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096
fsBlocks := uint64(1 << 50)
if runtime.GOOS == "windows" {
fsBlocks = (1 << 43) - 1
}
stat.Blocks = fsBlocks // Total data blocks in file system.
stat.Bfree = fsBlocks // Free blocks in file system.
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
stat.Files = 1E9 // Total files in file system.
stat.Ffree = 1E9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
return 0
}
// Open opens a file
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
defer fs.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
file, errc := fsys.lookupFile(path)
if errc != 0 {
return errc, fhUnset
}
rdwrMode := flags & fuse.O_ACCMODE
var err error
var handle mountlib.Noder
switch {
case rdwrMode == fuse.O_RDONLY:
handle, err = file.OpenRead()
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openFilesRd.Open(handle)
case rdwrMode == fuse.O_WRONLY || (rdwrMode == fuse.O_RDWR && (flags&fuse.O_TRUNC) != 0):
handle, err = file.OpenWrite()
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openFilesWr.Open(handle)
case rdwrMode == fuse.O_RDWR:
fs.Errorf(path, "Can't open for Read and Write")
return -fuse.EPERM, fhUnset
}
fs.Errorf(path, "Can't figure out how to open with flags: 0x%X", flags)
return -fuse.EPERM, fhUnset
}
// Create creates and opens a file.
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
defer fs.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc, fhUnset
}
_, handle, err := parentDir.Create(leaf)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openFilesWr.Open(handle)
}
// Truncate truncates a file to size
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
defer fs.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
node, errc := fsys.getNode(path, fh)
if errc != 0 {
return errc
}
file, ok := node.(*mountlib.File)
if !ok {
return -fuse.EIO
}
// Read the size so far
_, currentSize, _, err := file.Attr(true)
if err != nil {
return translateError(err)
}
fs.Debugf(path, "truncate to %d, currentSize %d", size, currentSize)
if int64(currentSize) != size {
fs.Errorf(path, "Can't truncate files")
return -fuse.EPERM
}
return 0
}
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
// FIXME detect seek
handle, errc := fsys.openFilesRd.Get(fh)
if errc != 0 {
return errc
}
rfh, ok := handle.(*mountlib.ReadFileHandle)
if !ok {
// Can only read from read file handle
return -fuse.EIO
}
data, err := rfh.Read(int64(len(buff)), ofst)
if err != nil {
return translateError(err)
}
n = copy(buff, data)
return n
}
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer fs.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
// FIXME detect seek
handle, errc := fsys.openFilesWr.Get(fh)
if errc != 0 {
return errc
}
wfh, ok := handle.(*mountlib.WriteFileHandle)
if !ok {
// Can only write to write file handle
return -fuse.EIO
}
// FIXME made Write return int and Read take int since must fit in RAM
n64, err := wfh.Write(buff, ofst)
if err != nil {
return translateError(err)
}
return int(n64)
}
// Flush flushes an open file descriptor or path
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
handle, errc := fsys.getHandleFromFh(fh)
if errc != 0 {
return errc
}
var err error
switch x := handle.(type) {
case *mountlib.ReadFileHandle:
err = x.Flush()
case *mountlib.WriteFileHandle:
err = x.Flush()
default:
return -fuse.EIO
}
return translateError(err)
}
// Release closes the file if still open
func (fsys *FS) Release(path string, fh uint64) (errc int) {
defer fs.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
of, errc := fsys.getOpenFilesFromFh(fh)
if errc != 0 {
return errc
}
handle, errc := of.Get(fh)
if errc != 0 {
return errc
}
_ = of.Close(fh)
var err error
switch x := handle.(type) {
case *mountlib.ReadFileHandle:
err = x.Release()
case *mountlib.WriteFileHandle:
err = x.Release()
default:
return -fuse.EIO
}
return translateError(err)
}
// Unlink removes a file.
func (fsys *FS) Unlink(filePath string) (errc int) {
defer fs.Trace(filePath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
}
return translateError(parentDir.Remove(leaf))
}
// Mkdir creates a directory.
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
defer fs.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
_, err := parentDir.Mkdir(leaf)
return translateError(err)
}
// Rmdir removes a directory
func (fsys *FS) Rmdir(dirPath string) (errc int) {
defer fs.Trace(dirPath, "")("errc=%d", &errc)
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
}
return translateError(parentDir.Remove(leaf))
}
// Rename renames a file.
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
defer fs.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
oldLeaf, oldParentDir, errc := fsys.lookupParentDir(oldPath)
if errc != 0 {
return errc
}
newLeaf, newParentDir, errc := fsys.lookupParentDir(newPath)
if errc != 0 {
return errc
}
return translateError(oldParentDir.Rename(oldLeaf, newLeaf, newParentDir))
}
// Utimens changes the access and modification times of a file.
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
defer fs.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
node, errc := fsys.lookupNode(path)
if errc != 0 {
return errc
}
var t time.Time
if tmsp == nil || len(tmsp) < 2 {
t = time.Now()
} else {
t = tmsp[1].Time()
}
var err error
switch x := node.(type) {
case *mountlib.Dir:
err = x.SetModTime(t)
case *mountlib.File:
err = x.SetModTime(t)
}
return translateError(err)
}
// Mknod creates a file node.
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
defer fs.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Fsync synchronizes file contents.
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Link creates a hard link to a file.
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
defer fs.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer fs.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer fs.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
defer fs.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Chown changes the owner and group of a file.
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
defer fs.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Access checks file access permissions.
func (fsys *FS) Access(path string, mask uint32) (errc int) {
defer fs.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Fsyncdir synchronizes directory contents.
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
defer fs.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
// This is a no-op for rclone
return 0
}
// Setxattr sets extended attributes.
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
return -fuse.ENOSYS
}
// Getxattr gets extended attributes.
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
return -fuse.ENOSYS, nil
}
// Removexattr removes extended attributes.
func (fsys *FS) Removexattr(path string, name string) (errc int) {
return -fuse.ENOSYS
}
// Listxattr lists extended attributes.
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
return -fuse.ENOSYS
}
// Translate errors from mountlib
func translateError(err error) (errc int) {
if err == nil {
return 0
}
cause := errors.Cause(err)
if mErr, ok := cause.(mountlib.Error); ok {
switch mErr {
case mountlib.OK:
return 0
case mountlib.ENOENT:
return -fuse.ENOENT
case mountlib.ENOTEMPTY:
return -fuse.ENOTEMPTY
case mountlib.EEXIST:
return -fuse.EEXIST
case mountlib.ESPIPE:
return -fuse.ESPIPE
case mountlib.EBADF:
return -fuse.EBADF
case mountlib.EROFS:
return -fuse.EROFS
}
}
fs.Errorf(nil, "IO error: %v", err)
return -fuse.EIO
}

View File

@@ -1,203 +0,0 @@
// Package cmount implents a FUSE mounting system for rclone remotes.
//
// This uses the cgo based cgofuse library
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"fmt"
"os"
"os/signal"
"runtime"
"syscall"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
func init() {
name := "cmount"
if runtime.GOOS == "windows" {
name = "mount"
}
mountlib.NewMountCommand(name, Mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(device string, mountpoint string) (options []string) {
// Options
options = []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
}
if mountlib.DebugFUSE {
options = append(options, "-o", "debug")
}
// OSX options
if runtime.GOOS == "darwin" {
options = append(options, "-o", "volname="+device)
options = append(options, "-o", "noappledouble")
options = append(options, "-o", "noapplexattr")
}
// Windows options
if runtime.GOOS == "windows" {
// These cause WinFsp to mean the current user
options = append(options, "-o", "uid=-1")
options = append(options, "-o", "gid=-1")
options = append(options, "--FileSystemName=rclone")
}
if mountlib.AllowNonEmpty {
options = append(options, "-o", "nonempty")
}
if mountlib.AllowOther {
options = append(options, "-o", "allow_other")
}
if mountlib.AllowRoot {
options = append(options, "-o", "allow_root")
}
if mountlib.DefaultPermissions {
options = append(options, "-o", "default_permissions")
}
if mountlib.ReadOnly {
options = append(options, "-o", "ro")
}
if mountlib.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
}
for _, option := range *mountlib.ExtraOptions {
options = append(options, "-o", option)
}
for _, option := range *mountlib.ExtraFlags {
options = append(options, option)
}
return options
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
// Check the mountpoint - in Windows the mountpoint musn't exist before the mount
if runtime.GOOS != "windows" {
fi, err := os.Stat(mountpoint)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "mountpoint")
}
if !fi.IsDir() {
return nil, nil, nil, errors.New("mountpoint is not a directory")
}
}
// Create underlying FS
fsys := NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Create options
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
fs.Debugf(f, "Mounting with options: %q", options)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
var err error
ok := host.Mount(mountpoint, options)
if !ok {
err = errors.New("mount failed")
fs.Errorf(f, "Mount failed")
}
errChan <- err
}()
// unmount
unmount := func() error {
fs.Debugf(nil, "Calling host.Unmount")
if host.Unmount() {
fs.Debugf(nil, "host.Unmount succeeded")
return nil
}
fs.Debugf(nil, "host.Unmount failed")
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-errChan:
err = errors.Wrap(err, "mount stopped before calling Init")
return nil, nil, nil, err
case <-fsys.ready:
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if runtime.GOOS == "windows" {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
_, err := os.Stat(mountpoint)
if err == nil {
goto found
}
time.Sleep(10 * time.Millisecond)
}
fs.Errorf(nil, "mountpoint %q didn't became available after %v - continuing anyway", mountpoint, totalWait)
found:
}
return fsys.FS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
// Mount it
FS, errChan, _, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
// Note cgofuse unmounts the fs on SIGINT etc
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -1,42 +0,0 @@
// +build cmount
// +build cgo
// +build linux darwin freebsd windows
package cmount
import (
"runtime"
"testing"
"github.com/ncw/rclone/cmd/mountlib/mounttest"
)
func notWin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("not running on windows")
}
}
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
func TestDirCreateAndRemoveDir(t *testing.T) { notWin(t); mounttest.TestDirCreateAndRemoveDir(t) }
func TestDirCreateAndRemoveFile(t *testing.T) { notWin(t); mounttest.TestDirCreateAndRemoveFile(t) }
func TestDirRenameFile(t *testing.T) { notWin(t); mounttest.TestDirRenameFile(t) }
func TestDirRenameEmptyDir(t *testing.T) { notWin(t); mounttest.TestDirRenameEmptyDir(t) }
func TestDirRenameFullDir(t *testing.T) { notWin(t); mounttest.TestDirRenameFullDir(t) }
func TestDirModTime(t *testing.T) { notWin(t); mounttest.TestDirModTime(t) }
func TestDirCacheFlush(t *testing.T) { notWin(t); mounttest.TestDirCacheFlush(t) }
func TestDirCacheFlushOnDirRename(t *testing.T) { notWin(t); mounttest.TestDirCacheFlushOnDirRename(t) }
func TestFileModTime(t *testing.T) { notWin(t); mounttest.TestFileModTime(t) }
func TestFileModTimeWithOpenWriters(t *testing.T) {} // FIXME mounttest.TestFileModTimeWithOpenWriters(t)
func TestMount(t *testing.T) { notWin(t); mounttest.TestMount(t) }
func TestRoot(t *testing.T) { notWin(t); mounttest.TestRoot(t) }
func TestReadByByte(t *testing.T) { notWin(t); mounttest.TestReadByByte(t) }
func TestReadChecksum(t *testing.T) { notWin(t); mounttest.TestReadChecksum(t) }
func TestReadFileDoubleClose(t *testing.T) { notWin(t); mounttest.TestReadFileDoubleClose(t) }
func TestReadSeek(t *testing.T) { notWin(t); mounttest.TestReadSeek(t) }
func TestWriteFileNoWrite(t *testing.T) { notWin(t); mounttest.TestWriteFileNoWrite(t) }
func TestWriteFileWrite(t *testing.T) { notWin(t); mounttest.TestWriteFileWrite(t) }
func TestWriteFileOverwrite(t *testing.T) { notWin(t); mounttest.TestWriteFileOverwrite(t) }
func TestWriteFileDoubleClose(t *testing.T) { notWin(t); mounttest.TestWriteFileDoubleClose(t) }
func TestWriteFileFsync(t *testing.T) { notWin(t); mounttest.TestWriteFileFsync(t) }

View File

@@ -1,6 +0,0 @@
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
package cmount

View File

@@ -1,48 +0,0 @@
package config
import (
"fmt"
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "config [function]",
Short: `Enter an interactive configuration session.`,
Long: "`rclone config`" + `
enters an interactive configuration sessions where you can setup
new remotes and manage existing ones. You may also set or remove a password to
protect your configuration.
Additional functions:
* ` + "`rclone config edit`" + ` same as above
* ` + "`rclone config file`" + ` show path of configuration file in use
* ` + "`rclone config show`" + ` print (decrypted) config file
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if len(args) == 0 {
fs.EditConfig()
return
}
switch args[0] {
case "edit":
fs.EditConfig()
case "show":
fs.ShowConfig()
case "file":
fs.ShowConfigLocation()
default:
fmt.Fprintf(os.Stderr, "Unknown subcommand %q, %s only supports edit, show and file.\n", args[0], command.Name())
}
},
}

View File

@@ -1,63 +0,0 @@
package copy
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or
MD5SUM. Doesn't delete files from the destination.
Note that it is always the contents of the directory that is synced,
not the directory so when source:path is a directory, it's the
contents of source:path that are copied, not the directory name and
contents.
If dest:path doesn't exist, it is created and the source:path contents
go there.
For example
rclone copy source:sourcepath dest:destpath
Let's say there are two files in sourcepath
sourcepath/one.txt
sourcepath/two.txt
This copies them to
destpath/one.txt
destpath/two.txt
Not to
destpath/sourcepath/one.txt
destpath/sourcepath/two.txt
If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
See the ` + "`--no-traverse`" + ` option for controlling whether rclone lists
the destination directory or not.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(true, true, command, func() error {
return fs.CopyDir(fdst, fsrc)
})
},
}

View File

@@ -1,53 +0,0 @@
package copyto
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `
If source:path is a file or directory then it copies it to a file or
directory named dest:path.
This can be used to upload single files to other than their current
name. If the source is a directory then it acts exactly like the copy
command.
So
rclone copyto src dst
where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows.
This will:
if src is file
copy it to dst, overwriting an existing file if it exists
if src is directory
copy it to dst, overwriting existing files if they exist
see copy command for full details
This doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. It doesn't delete files from the
destination.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return fs.CopyDir(fdst, fsrc)
}
return fs.CopyFile(fdst, fsrc, dstFileName, srcFileName)
})
},
}

View File

@@ -1,101 +0,0 @@
package cryptcheck
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/crypt"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "cryptcheck remote:path cryptedremote:path",
Short: `Cryptcheck checks the integrity of a crypted remote.`,
Long: `
rclone cryptcheck checks a remote against a crypted remote. This is
the equivalent of running rclone check, but able to check the
checksums of the crypted remote.
For it to work the underlying remote of the cryptedremote must support
some kind of checksum.
It works by reading the nonce from each file on the cryptedremote: and
using that to encrypt each file on the remote:. It then checks the
checksum of the underlying file on the cryptedremote: against the
checksum of the file it has just encrypted.
Use it like this
rclone cryptcheck /path/to/files encryptedremote:path
You can use it like this also, but that will involve downloading all
the files in remote:path.
rclone cryptcheck remote:path encryptedremote:path
After it has run it will log the status of the encryptedremote:.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(false, true, command, func() error {
return cryptCheck(fdst, fsrc)
})
},
}
// cryptCheck checks the integrity of a crypted remote
func cryptCheck(fdst, fsrc fs.Fs) error {
// Check to see fcrypt is a crypt
fcrypt, ok := fdst.(*crypt.Fs)
if !ok {
return errors.Errorf("%s:%s is not a crypt remote", fdst.Name(), fdst.Root())
}
// Find a hash to use
funderlying := fcrypt.UnWrap()
hashType := funderlying.Hashes().GetOne()
if hashType == fs.HashNone {
return errors.Errorf("%s:%s does not support any hashes", funderlying.Name(), funderlying.Root())
}
fs.Infof(nil, "Using %v for hash comparisons", hashType)
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) {
cryptDst := dst.(*crypt.Object)
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(hashType)
if err != nil {
fs.Stats.Error()
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
return true, false
}
if underlyingHash == "" {
return false, true
}
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
if err != nil {
fs.Stats.Error()
fs.Errorf(dst, "Error computing hash: %v", err)
return true, false
}
if cryptHash == "" {
return false, true
}
if cryptHash != underlyingHash {
fs.Stats.Error()
fs.Errorf(src, "hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
return true, false
}
fs.Debugf(src, "OK")
return false, false
}
return fs.CheckFn(fcrypt, fsrc, checkIdentical)
}

View File

@@ -1,59 +0,0 @@
package cryptdecode
import (
"fmt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/crypt"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
}
var commandDefinition = &cobra.Command{
Use: "cryptdecode encryptedremote: encryptedfilename",
Short: `Cryptdecode returns unencrypted file names.`,
Long: `
rclone cryptdecode returns unencrypted file names when provided with
a list of encrypted file names. List limit is 10 items.
use it like this
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 11, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return cryptDecode(fsrc, args[1:])
})
},
}
// cryptDecode returns the unencrypted file name
func cryptDecode(fsrc fs.Fs, args []string) error {
// Check if fsrc is a crypt
fcrypt, ok := fsrc.(*crypt.Fs)
if !ok {
return errors.Errorf("%s:%s is not a crypt remote", fsrc.Name(), fsrc.Root())
}
output := ""
for _, encryptedFileName := range args {
fileName, err := fcrypt.DecryptFileName(encryptedFileName)
if err != nil {
output += fmt.Sprintln(encryptedFileName, "\t", "Failed to decrypt")
} else {
output += fmt.Sprintln(encryptedFileName, "\t", fileName)
}
}
fmt.Printf(output)
return nil
}

View File

@@ -1,31 +0,0 @@
package dbhashsum
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbbox hash file for all the objects in the path.`,
Long: `
Produces a Dropbox hash file for all the objects in the path. The
hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.DropboxHashSum(fsrc, os.Stdout)
})
},
}

View File

@@ -1,117 +0,0 @@
package dedupe
import (
"log"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
var (
dedupeMode = fs.DeduplicateInteractive
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
}
var commandDefintion = &cobra.Command{
Use: "dedupe [mode] remote:path",
Short: `Interactively find duplicate files delete/rename them.`,
Long: `
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
delete all but one or rename them to be different. Only useful with
Google Drive which can have duplicate file names.
In the first pass it will merge directories with the same name. It
will do this iteratively until all the identical directories have been
merged.
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
md5sum) files it finds without confirmation. This means that for most
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive. You
can use ` + "`" + `--dry-run` + "`" + ` to see what would happen without doing anything.
Here is an example run.
Before - with duplicates
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:22:46.185000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
Now the ` + "`" + `dedupe` + "`" + ` session
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 duplicates - deleting identical copies
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 duplicates - deleting identical copies
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
The result being
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
For example to rename all the identically named photos in your Google Photos directory, do
rclone dedupe --dedupe-mode rename "drive:Google Photos"
Or
rclone dedupe rename "drive:Google Photos"
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 2, command, args)
if len(args) > 1 {
err := dedupeMode.Set(args[0])
if err != nil {
log.Fatal(err)
}
args = args[1:]
}
fdst := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.Deduplicate(fdst, dedupeMode)
})
},
}

View File

@@ -1,41 +0,0 @@
package delete
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "delete remote:path",
Short: `Remove the contents of path.`,
Long: `
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
filters so can be used to selectively delete files.
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
Then delete
rclone --min-size 100M delete remote:path
That reads "delete everything with a minimum size of 100 MB", hence
delete all files bigger than 100MBytes.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
return fs.Delete(fsrc)
})
},
}

View File

@@ -1,19 +0,0 @@
package genautocomplete
import (
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(completionDefinition)
}
var completionDefinition = &cobra.Command{
Use: "genautocomplete [shell]",
Short: `Output completion script for a given shell.`,
Long: `
Generates a shell completion script for rclone.
Run with --help to list the supported shells.
`,
}

View File

@@ -1,44 +0,0 @@
package genautocomplete
import (
"log"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(bashCommandDefinition)
}
var bashCommandDefinition = &cobra.Command{
Use: "bash [output_file]",
Short: `Output bash completion script for rclone.`,
Long: `
Generates a bash shell autocompletion script for rclone.
This writes to /etc/bash_completion.d/rclone by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete bash
Logout and login again to use the autocompletion scripts, or source
them directly
. /etc/bash_completion
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/bash_completion.d/rclone"
if len(args) > 0 {
out = args[0]
}
err := cmd.Root.GenBashCompletionFile(out)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -1,35 +0,0 @@
package genautocomplete
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCompletionBash(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_bash")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionZsh(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}

View File

@@ -1,50 +0,0 @@
package genautocomplete
import (
"log"
"os"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(zshCommandDefinition)
}
var zshCommandDefinition = &cobra.Command{
Use: "zsh [output_file]",
Short: `Output zsh completion script for rclone.`,
Long: `
Generates a zsh autocompletion script for rclone.
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete zsh
Logout and login again to use the autocompletion scripts, or source
them directly
autoload -U compinit && compinit
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/usr/share/zsh/vendor-completions/_rclone"
if len(args) > 0 {
out = args[0]
}
outFile, err := os.Create(out)
if err != nil {
log.Fatal(err)
}
defer func() { _ = outFile.Close() }()
err = cmd.Root.GenZshCompletion(outFile)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -1,55 +0,0 @@
package gendocs
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/ncw/rclone/cmd"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
const gendocFrontmatterTemplate = `---
date: %s
title: "%s"
slug: %s
url: %s
---
`
var commandDefintion = &cobra.Command{
Use: "gendocs output_directory",
Short: `Output markdown docs for rclone to the directory supplied.`,
Long: `
This produces markdown docs for the rclone commands to the directory
supplied. These are in a format suitable for hugo to render into the
rclone.org website.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
out := args[0]
err := os.MkdirAll(out, 0777)
if err != nil {
return err
}
now := time.Now().Format(time.RFC3339)
prepender := func(filename string) string {
name := filepath.Base(filename)
base := strings.TrimSuffix(name, path.Ext(name))
url := "/commands/" + strings.ToLower(base) + "/"
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
}
linkHandler := func(name string) string {
base := strings.TrimSuffix(name, path.Ext(name))
return "/commands/" + strings.ToLower(base) + "/"
}
return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
},
}

View File

@@ -1,18 +0,0 @@
#!/bin/bash
exec rclone --check-normalization=true --check-control=true --check-length=true info \
/tmp/testInfo \
TestAmazonCloudDrive:testInfo \
TestB2:testInfo \
TestCryptDrive:testInfo \
TestCryptSwift:testInfo \
TestDrive:testInfo \
TestDropbox:testInfo \
TestGoogleCloudStorage:rclone-testinfo \
TestOneDrive:testInfo \
TestS3:rclone-testinfo \
TestSftp:testInfo \
TestSwift:testInfo \
TestYandex:testInfo \
TestFTP:testInfo
# TestHubic:testInfo \

View File

@@ -1,267 +0,0 @@
package info
// FIXME once translations are implemented will need a no-escape
// option for Put so we can make these tests work agaig
import (
"bytes"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
checkNormalization bool
checkControl bool
checkLength bool
checkStreaming bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
}
var commandDefintion = &cobra.Command{
Use: "info [remote:path]+",
Short: `Discovers file name or other limitations for paths.`,
Long: `rclone info discovers what filenames and upload methods are possible
to write to the paths passed in and how long they can be. It can take some
time. It will write test files into the remote:path passed in. It outputs
a bit of go code for each one.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1E6, command, args)
for i := range args {
f := cmd.NewFsDst(args[i : i+1])
cmd.Run(false, false, command, func() error {
return readInfo(f)
})
}
},
}
type results struct {
f fs.Fs
mu sync.Mutex
charNeedsEscaping map[rune]bool
maxFileLength int
canWriteUnnormalized bool
canReadUnnormalized bool
canReadRenormalized bool
canStream bool
}
func newResults(f fs.Fs) *results {
return &results{
f: f,
charNeedsEscaping: make(map[rune]bool),
}
}
// Print the results to stdout
func (r *results) Print() {
fmt.Printf("// %s\n", r.f.Name())
if checkControl {
escape := []string{}
for c, needsEscape := range r.charNeedsEscaping {
if needsEscape {
escape = append(escape, fmt.Sprintf("0x%02X", c))
}
}
sort.Strings(escape)
fmt.Printf("charNeedsEscaping = []byte{\n")
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
fmt.Printf("}\n")
}
if checkLength {
fmt.Printf("maxFileLength = %d\n", r.maxFileLength)
}
if checkNormalization {
fmt.Printf("canWriteUnnormalized = %v\n", r.canWriteUnnormalized)
fmt.Printf("canReadUnnormalized = %v\n", r.canReadUnnormalized)
fmt.Printf("canReadRenormalized = %v\n", r.canReadRenormalized)
}
if checkStreaming {
fmt.Printf("canStream = %v\n", r.canStream)
}
}
// writeFile writes a file with some random contents
func (r *results) writeFile(path string) (fs.Object, error) {
contents := fstest.RandomString(50)
src := fs.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
return r.f.Put(bytes.NewBufferString(contents), src)
}
// check whether normalization is enforced and check whether it is
// done on the files anyway
func (r *results) checkUTF8Normalization() {
unnormalized := "Héroique"
normalized := "Héroique"
_, err := r.writeFile(unnormalized)
if err != nil {
r.canWriteUnnormalized = false
return
}
r.canWriteUnnormalized = true
_, err = r.f.NewObject(unnormalized)
if err == nil {
r.canReadUnnormalized = true
}
_, err = r.f.NewObject(normalized)
if err == nil {
r.canReadRenormalized = true
}
}
// check we can write file with the rune passed in
func (r *results) checkChar(c rune) {
fs.Infof(r.f, "Writing file 0x%02X", c)
path := fmt.Sprintf("0x%02X-%c-", c, c)
_, err := r.writeFile(path)
escape := false
if err != nil {
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
} else {
fs.Infof(r.f, "OK writing file 0x%02X", c)
}
r.mu.Lock()
r.charNeedsEscaping[c] = escape
r.mu.Unlock()
}
// check we can write a file with the control chars
func (r *results) checkControls() {
fs.Infof(r.f, "Trying to create control character file names")
// Concurrency control
tokens := make(chan struct{}, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
tokens <- struct{}{}
}
var wg sync.WaitGroup
for i := rune(0); i < 128; i++ {
if i == 0 || i == '/' {
// We're not even going to check NULL or /
r.charNeedsEscaping[i] = true
continue
}
wg.Add(1)
c := i
go func() {
defer wg.Done()
token := <-tokens
r.checkChar(c)
tokens <- token
}()
}
wg.Wait()
fs.Infof(r.f, "Done trying to create control character file names")
}
// find the max file name size we can use
func (r *results) findMaxLength() {
const maxLen = 16 * 1024
name := make([]byte, maxLen)
for i := range name {
name[i] = 'a'
}
// Find the first size of filename we can't write
i := sort.Search(len(name), func(i int) (fail bool) {
defer func() {
if err := recover(); err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
fail = true
}
}()
path := string(name[:i])
_, err := r.writeFile(path)
if err != nil {
fs.Infof(r.f, "Couldn't write file with name length %d: %v", i, err)
return true
}
fs.Infof(r.f, "Wrote file with name length %d", i)
return false
})
r.maxFileLength = i - 1
fs.Infof(r.f, "Max file length is %d", r.maxFileLength)
}
func (r *results) checkStreaming() {
putter := r.f.Put
if r.f.Features().PutStream != nil {
fs.Infof(r.f, "Given remote has specialized streaming function. Using that to test streaming.")
putter = r.f.Features().PutStream
}
contents := "thinking of test strings is hard"
buf := bytes.NewBufferString(contents)
hashIn := fs.NewMultiHasher()
in := io.TeeReader(buf, hashIn)
objIn := fs.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
objR, err := putter(in, objIn)
if err != nil {
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
r.canStream = false
return
}
hashes := hashIn.Sums()
types := objR.Fs().Hashes().Array()
for _, hash := range types {
sum, err := objR.Hash(hash)
if err != nil {
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", hash, err)
r.canStream = false
return
}
if !fs.HashEquals(hashes[hash], sum) {
fs.Infof(r.f, "Streamed file has incorrect hash %v: expecting %q got %q", hash, hashes[hash], sum)
r.canStream = false
return
}
}
if int64(len(contents)) != objR.Size() {
fs.Infof(r.f, "Streamed file has incorrect file size: expecting %d got %d", len(contents), objR.Size())
r.canStream = false
return
}
r.canStream = true
}
func readInfo(f fs.Fs) error {
err := f.Mkdir("")
if err != nil {
return errors.Wrap(err, "couldn't mkdir")
}
r := newResults(f)
if checkControl {
r.checkControls()
}
if checkLength {
r.findMaxLength()
}
if checkNormalization {
r.checkUTF8Normalization()
}
if checkStreaming {
r.checkStreaming()
}
r.Print()
return nil
}

View File

@@ -1,49 +0,0 @@
package ls
import (
"fmt"
"sort"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
// Globals
var (
listLong bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
}
var commandDefintion = &cobra.Command{
Use: "listremotes",
Short: `List all the remotes in the config file.`,
Long: `
rclone listremotes lists all the available remotes from the config file.
When uses with the -l flag it lists the types too.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
remotes := fs.ConfigFileSections()
sort.Strings(remotes)
maxlen := 1
for _, remote := range remotes {
if len(remote) > maxlen {
maxlen = len(remote)
}
}
for _, remote := range remotes {
if listLong {
remoteType := fs.ConfigFileGet(remote, "type", "UNKNOWN")
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
} else {
fmt.Printf("%s:\n", remote)
}
}
},
}

View File

@@ -1,25 +0,0 @@
package ls
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "ls remote:path",
Short: `List all the objects in the path with size and path.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.List(fsrc, os.Stdout)
})
},
}

View File

@@ -1,46 +0,0 @@
package ls2
import (
"fmt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
var (
recurse bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefintion = &cobra.Command{
Use: "ls2 remote:path",
Short: `List directories and objects in the path.`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Stats.Error()
fs.Errorf(path, "error listing: %v", err)
return nil
}
for _, entry := range entries {
_, isDir := entry.(fs.Directory)
if isDir {
fmt.Println(entry.Remote() + "/")
} else {
fmt.Println(entry.Remote())
}
}
return nil
})
})
},
}

View File

@@ -1,25 +0,0 @@
package lsd
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "lsd remote:path",
Short: `List all directories/containers/buckets in the path.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.ListDir(fsrc, os.Stdout)
})
},
}

View File

@@ -1,147 +0,0 @@
package lsjson
import (
"encoding/json"
"fmt"
"os"
"path"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
recurse bool
showHash bool
noModTime bool
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
}
// lsJSON in the struct which gets marshalled for each line
type lsJSON struct {
Path string
Name string
Size int64
ModTime Timestamp //`json:",omitempty"`
IsDir bool
Hashes map[string]string `json:",omitempty"`
}
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON
func (t Timestamp) MarshalJSON() (out []byte, err error) {
tt := time.Time(t)
if tt.IsZero() {
return []byte(`""`), nil
}
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
}
var commandDefintion = &cobra.Command{
Use: "lsjson remote:path",
Short: `List directories and objects in the path in JSON format.`,
Long: `List directories and objects in the path in JSON format.
The output is an array of Items, where each Item looks like this
{
"Hashes" : {
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
"MD5" : "b1946ac92492d2347c6235b4d2611184",
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
},
"IsDir" : false,
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
"Name" : "file.txt",
"Path" : "full/path/goes/here/file.txt",
"Size" : 6
}
If --hash is not specified the the Hashes property won't be emitted.
If --no-modtime is specified then ModTime will be blank.
The time is in RFC3339 format with nanosecond precision.
The whole output can be processed as a JSON blob, or alternatively it
can be processed line by line as each item is written one to a line.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
fmt.Println("[")
first := true
err := fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Stats.Error()
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}
for _, entry := range entries {
item := lsJSON{
Path: entry.Remote(),
Name: path.Base(entry.Remote()),
Size: entry.Size(),
}
if !noModTime {
item.ModTime = Timestamp(entry.ModTime())
}
switch x := entry.(type) {
case fs.Directory:
item.IsDir = true
case fs.Object:
item.IsDir = false
if showHash {
item.Hashes = make(map[string]string)
for _, hashType := range x.Fs().Hashes().Array() {
hash, err := x.Hash(hashType)
if err != nil {
fs.Errorf(x, "Failed to read hash: %v", err)
} else if hash != "" {
item.Hashes[hashType.String()] = hash
}
}
}
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
out, err := json.Marshal(item)
if err != nil {
return errors.Wrap(err, "failed to marshal list object")
}
if first {
first = false
} else {
fmt.Print(",\n")
}
_, err = os.Stdout.Write(out)
if err != nil {
return errors.Wrap(err, "failed to write to output")
}
}
return nil
})
if err != nil {
return errors.Wrap(err, "error listing JSON")
}
if !first {
fmt.Println()
}
fmt.Println("]")
return nil
})
},
}

View File

@@ -1,25 +0,0 @@
package lsl
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "lsl remote:path",
Short: `List all the objects path with modification time, size and path.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.ListLong(fsrc, os.Stdout)
})
},
}

View File

@@ -1,29 +0,0 @@
package md5sum
import (
"os"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "md5sum remote:path",
Short: `Produces an md5sum file for all the objects in the path.`,
Long: `
Produces an md5sum file for all the objects in the path. This
is in the same format as the standard md5sum tool produces.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
return fs.Md5sum(fsrc, os.Stdout)
})
},
}

View File

@@ -1,49 +0,0 @@
package memtest
import (
"runtime"
"sync"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "memtest remote:path",
Short: `Load all the objects at remote:path and report memory stats.`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
objects, _, err := fs.Count(fsrc)
if err != nil {
return err
}
objs := make([]fs.Object, 0, objects)
var before, after runtime.MemStats
runtime.GC()
runtime.ReadMemStats(&before)
var mu sync.Mutex
err = fs.ListFn(fsrc, func(o fs.Object) {
mu.Lock()
objs = append(objs, o)
mu.Unlock()
})
if err != nil {
return err
}
runtime.GC()
runtime.ReadMemStats(&after)
usedMemory := after.Alloc - before.Alloc
fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
return nil
})
},
}

View File

@@ -1,23 +0,0 @@
package mkdir
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "mkdir remote:path",
Short: `Make the path if it doesn't already exist.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst := cmd.NewFsDst(args)
cmd.Run(true, false, command, func() error {
return fs.Mkdir(fdst, "")
})
},
}

View File

@@ -1,204 +0,0 @@
// +build linux darwin freebsd
package mount
import (
"os"
"path"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// DirEntry describes the contents of a directory entry
//
// It can be a file or a directory
//
// node may be nil, but o may not
type DirEntry struct {
o fs.DirEntry
node fusefs.Node
}
// Dir represents a directory entry
type Dir struct {
*mountlib.Dir
// f fs.Fs
// path string
// modTime time.Time
// mu sync.RWMutex // protects the following
// read time.Time // time directory entry last read
// items map[string]*DirEntry
}
// Check interface satsified
var _ fusefs.Node = (*Dir)(nil)
// Attr updates the attributes of a directory
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer fs.Trace(d, "")("attr=%+v, err=%v", a, &err)
a.Gid = mountlib.GID
a.Uid = mountlib.UID
a.Mode = os.ModeDir | mountlib.DirPerms
modTime := d.ModTime()
a.Atime = modTime
a.Mtime = modTime
a.Ctime = modTime
a.Crtime = modTime
// FIXME include Valid so get some caching?
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
return nil
}
// Check interface satisfied
var _ fusefs.NodeSetattrer = (*Dir)(nil)
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
func (d *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
defer fs.Trace(d, "stat=%+v", req)("err=%v", &err)
if mountlib.NoModTime {
return nil
}
if req.Valid.MtimeNow() {
err = d.SetModTime(time.Now())
} else if req.Valid.Mtime() {
err = d.SetModTime(req.Mtime)
}
return translateError(err)
}
// Check interface satisfied
var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
// Lookup looks up a specific entry in the receiver.
//
// Lookup should return a Node corresponding to the entry. If the
// name does not exist in the directory, Lookup should return ENOENT.
//
// Lookup need not to handle the names "." and "..".
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
mnode, err := d.Dir.Lookup(req.Name)
if err != nil {
return nil, translateError(err)
}
switch x := mnode.(type) {
case *mountlib.File:
return &File{x}, nil
case *mountlib.Dir:
return &Dir{x}, nil
}
panic("bad type")
}
// Check interface satisfied
var _ fusefs.HandleReadDirAller = (*Dir)(nil)
// ReadDirAll reads the contents of the directory
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
itemsRead := -1
defer fs.Trace(d, "")("item=%d, err=%v", &itemsRead, &err)
items, err := d.Dir.ReadDirAll()
if err != nil {
return nil, translateError(err)
}
for _, item := range items {
var dirent fuse.Dirent
switch x := item.Obj.(type) {
case fs.Object:
dirent = fuse.Dirent{
// Inode FIXME ???
Type: fuse.DT_File,
Name: path.Base(x.Remote()),
}
case fs.Directory:
dirent = fuse.Dirent{
// Inode FIXME ???
Type: fuse.DT_Dir,
Name: path.Base(x.Remote()),
}
default:
return nil, errors.Errorf("unknown type %T", item)
}
dirents = append(dirents, dirent)
}
itemsRead = len(dirents)
return dirents, nil
}
var _ fusefs.NodeCreater = (*Dir)(nil)
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
defer fs.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
file, fh, err := d.Dir.Create(req.Name)
if err != nil {
return nil, nil, translateError(err)
}
return &File{file}, &WriteFileHandle{fh}, err
}
var _ fusefs.NodeMkdirer = (*Dir)(nil)
// Mkdir creates a new directory
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.Node, err error) {
defer fs.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
dir, err := d.Dir.Mkdir(req.Name)
if err != nil {
return nil, translateError(err)
}
return &Dir{dir}, nil
}
var _ fusefs.NodeRemover = (*Dir)(nil)
// Remove removes the entry with the given name from
// the receiver, which must be a directory. The entry to be removed
// may correspond to a file (unlink) or to a directory (rmdir).
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
defer fs.Trace(d, "name=%q", req.Name)("err=%v", &err)
err = d.Dir.Remove(req.Name)
if err != nil {
return translateError(err)
}
return nil
}
// Check interface satisfied
var _ fusefs.NodeRenamer = (*Dir)(nil)
// Rename the file
func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs.Node) (err error) {
defer fs.Trace(d, "oldName=%q, newName=%q, newDir=%+v", req.OldName, req.NewName, newDir)("err=%v", &err)
destDir, ok := newDir.(*Dir)
if !ok {
return errors.Errorf("Unknown Dir type %T", newDir)
}
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
if err != nil {
return translateError(err)
}
return nil
}
// Check interface satisfied
var _ fusefs.NodeFsyncer = (*Dir)(nil)
// Fsync the directory
func (d *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
defer fs.Trace(d, "")("err=%v", &err)
err = d.Dir.Fsync()
if err != nil {
return translateError(err)
}
return nil
}

View File

@@ -1,118 +0,0 @@
// +build linux darwin freebsd
package mount
import (
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// File represents a file
type File struct {
*mountlib.File
// size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
// d *Dir // parent directory - read only
// mu sync.RWMutex // protects the following
// o fs.Object // NB o may be nil if file is being written
// writers int // number of writers for this file
// pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
}
// Check interface satisfied
var _ fusefs.Node = (*File)(nil)
// Attr fills out the attributes for the file
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer fs.Trace(f, "")("a=%+v, err=%v", a, &err)
modTime, Size, Blocks, err := f.File.Attr(mountlib.NoModTime)
if err != nil {
return translateError(err)
}
a.Gid = mountlib.GID
a.Uid = mountlib.UID
a.Mode = mountlib.FilePerms
a.Size = Size
a.Atime = modTime
a.Mtime = modTime
a.Ctime = modTime
a.Crtime = modTime
a.Blocks = Blocks
return nil
}
// Check interface satisfied
var _ fusefs.NodeSetattrer = (*File)(nil)
// Setattr handles attribute changes from FUSE. Currently supports ModTime only.
func (f *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
defer fs.Trace(f, "a=%+v", req)("err=%v", &err)
if mountlib.NoModTime {
return nil
}
if req.Valid.MtimeNow() {
err = f.File.SetModTime(time.Now())
} else if req.Valid.Mtime() {
err = f.File.SetModTime(req.Mtime)
}
return translateError(err)
}
// Check interface satisfied
var _ fusefs.NodeOpener = (*File)(nil)
// Open the file for read or write
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fh fusefs.Handle, err error) {
defer fs.Trace(f, "flags=%v", req.Flags)("fh=%v, err=%v", &fh, &err)
switch {
case req.Flags.IsReadOnly():
if mountlib.NoSeek {
resp.Flags |= fuse.OpenNonSeekable
}
var rfh *mountlib.ReadFileHandle
rfh, err = f.File.OpenRead()
fh = &ReadFileHandle{rfh}
case req.Flags.IsWriteOnly() || (req.Flags.IsReadWrite() && (req.Flags&fuse.OpenTruncate) != 0):
resp.Flags |= fuse.OpenNonSeekable
var wfh *mountlib.WriteFileHandle
wfh, err = f.File.OpenWrite()
fh = &WriteFileHandle{wfh}
case req.Flags.IsReadWrite():
err = errors.New("can't open for read and write simultaneously")
default:
err = errors.Errorf("can't figure out how to open with flags %v", req.Flags)
}
/*
// File was opened in append-only mode, all writes will go to end
// of file. OS X does not provide this information.
OpenAppend OpenFlags = syscall.O_APPEND
OpenCreate OpenFlags = syscall.O_CREAT
OpenDirectory OpenFlags = syscall.O_DIRECTORY
OpenExclusive OpenFlags = syscall.O_EXCL
OpenNonblock OpenFlags = syscall.O_NONBLOCK
OpenSync OpenFlags = syscall.O_SYNC
OpenTruncate OpenFlags = syscall.O_TRUNC
*/
if err != nil {
return nil, translateError(err)
}
return fh, nil
}
// Check interface satisfied
var _ fusefs.NodeFsyncer = (*File)(nil)
// Fsync the file
//
// Note that we don't do anything except return OK
func (f *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
defer fs.Trace(f, "")("err=%v", &err)
return nil
}

View File

@@ -1,91 +0,0 @@
// FUSE main Fs
// +build linux darwin freebsd
package mount
import (
"syscall"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// FS represents the top level filing system
type FS struct {
*mountlib.FS
f fs.Fs
}
// Check interface satistfied
var _ fusefs.FS = (*FS)(nil)
// NewFS makes a new FS
func NewFS(f fs.Fs) *FS {
fsys := &FS{
FS: mountlib.NewFS(f),
f: f,
}
return fsys
}
// Root returns the root node
func (f *FS) Root() (node fusefs.Node, err error) {
defer fs.Trace("", "")("node=%+v, err=%v", &node, &err)
root, err := f.FS.Root()
if err != nil {
return nil, translateError(err)
}
return &Dir{root}, nil
}
// Check interface satsified
var _ fusefs.FSStatfser = (*FS)(nil)
// Statfs is called to obtain file system metadata.
// It should write that data to resp.
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
defer fs.Trace("", "")("stat=%+v, err=%v", resp, &err)
const blockSize = 4096
const fsBlocks = (1 << 50) / blockSize
resp.Blocks = fsBlocks // Total data blocks in file system.
resp.Bfree = fsBlocks // Free blocks in file system.
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
resp.Files = 1E9 // Total files in file system.
resp.Ffree = 1E9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
return nil
}
// Translate errors from mountlib
func translateError(err error) error {
if err == nil {
return nil
}
cause := errors.Cause(err)
if mErr, ok := cause.(mountlib.Error); ok {
switch mErr {
case mountlib.OK:
return nil
case mountlib.ENOENT:
return fuse.ENOENT
case mountlib.ENOTEMPTY:
return fuse.Errno(syscall.ENOTEMPTY)
case mountlib.EEXIST:
return fuse.EEXIST
case mountlib.ESPIPE:
return fuse.Errno(syscall.ESPIPE)
case mountlib.EBADF:
return fuse.Errno(syscall.EBADF)
case mountlib.EROFS:
return fuse.Errno(syscall.EROFS)
}
}
return err
}

View File

@@ -1,153 +0,0 @@
// Package mount implents a FUSE mounting system for rclone remotes.
// +build linux darwin freebsd
package mount
import (
"os"
"os/signal"
"syscall"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
func init() {
mountlib.NewMountCommand("mount", Mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(device string) (options []fuse.MountOption) {
options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.AsyncRead(), - FIXME this causes
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
// which is probably related to errors people are having
//fuse.WritebackCache(),
}
if mountlib.AllowNonEmpty {
options = append(options, fuse.AllowNonEmptyMount())
}
if mountlib.AllowOther {
options = append(options, fuse.AllowOther())
}
if mountlib.AllowRoot {
options = append(options, fuse.AllowRoot())
}
if mountlib.DefaultPermissions {
options = append(options, fuse.DefaultPermissions())
}
if mountlib.ReadOnly {
options = append(options, fuse.ReadOnly())
}
if mountlib.WritebackCache {
options = append(options, fuse.WritebackCache())
}
if len(*mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
}
if len(*mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
}
return options
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
if err != nil {
return nil, nil, nil, err
}
filesys := NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
errChan <- err
}()
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
return nil, nil, nil, err
}
unmount := func() error {
return fuse.Unmount(mountpoint)
}
return filesys.FS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
if mountlib.DebugFUSE {
fuse.Debug = func(msg interface{}) {
fs.Debugf("fuse", "%v", msg)
}
}
// Mount it
FS, errChan, unmount, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
sigInt := make(chan os.Signal, 1)
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// Program abort: umount
case <-sigInt:
err = unmount()
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -1,33 +0,0 @@
// +build linux darwin freebsd
package mount
import (
"testing"
"github.com/ncw/rclone/cmd/mountlib/mounttest"
)
func TestMain(m *testing.M) { mounttest.TestMain(m, mount) }
func TestDirLs(t *testing.T) { mounttest.TestDirLs(t) }
func TestDirCreateAndRemoveDir(t *testing.T) { mounttest.TestDirCreateAndRemoveDir(t) }
func TestDirCreateAndRemoveFile(t *testing.T) { mounttest.TestDirCreateAndRemoveFile(t) }
func TestDirRenameFile(t *testing.T) { mounttest.TestDirRenameFile(t) }
func TestDirRenameEmptyDir(t *testing.T) { mounttest.TestDirRenameEmptyDir(t) }
func TestDirRenameFullDir(t *testing.T) { mounttest.TestDirRenameFullDir(t) }
func TestDirModTime(t *testing.T) { mounttest.TestDirModTime(t) }
func TestDirCacheFlush(t *testing.T) { mounttest.TestDirCacheFlush(t) }
func TestDirCacheFlushOnDirRename(t *testing.T) { mounttest.TestDirCacheFlushOnDirRename(t) }
func TestFileModTime(t *testing.T) { mounttest.TestFileModTime(t) }
func TestFileModTimeWithOpenWriters(t *testing.T) { mounttest.TestFileModTimeWithOpenWriters(t) }
func TestMount(t *testing.T) { mounttest.TestMount(t) }
func TestRoot(t *testing.T) { mounttest.TestRoot(t) }
func TestReadByByte(t *testing.T) { mounttest.TestReadByByte(t) }
func TestReadChecksum(t *testing.T) { mounttest.TestReadChecksum(t) }
func TestReadFileDoubleClose(t *testing.T) { mounttest.TestReadFileDoubleClose(t) }
func TestReadSeek(t *testing.T) { mounttest.TestReadSeek(t) }
func TestWriteFileNoWrite(t *testing.T) { mounttest.TestWriteFileNoWrite(t) }
func TestWriteFileWrite(t *testing.T) { mounttest.TestWriteFileWrite(t) }
func TestWriteFileOverwrite(t *testing.T) { mounttest.TestWriteFileOverwrite(t) }
func TestWriteFileDoubleClose(t *testing.T) { mounttest.TestWriteFileDoubleClose(t) }
func TestWriteFileFsync(t *testing.T) { mounttest.TestWriteFileFsync(t) }

View File

@@ -1,6 +0,0 @@
// Build for mount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !linux,!darwin,!freebsd
package mount

View File

@@ -1,63 +0,0 @@
// +build linux darwin freebsd
package mount
import (
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"golang.org/x/net/context"
)
// ReadFileHandle is an open for read file handle on a File
type ReadFileHandle struct {
*mountlib.ReadFileHandle
// mu sync.Mutex
// closed bool // set if handle has been closed
// r *fs.Account
// o fs.Object
// readCalled bool // set if read has been called
// offset int64
}
// Check interface satisfied
var _ fusefs.Handle = (*ReadFileHandle)(nil)
// Check interface satisfied
var _ fusefs.HandleReader = (*ReadFileHandle)(nil)
// Read from the file handle
func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
dataRead := -1
defer fs.Trace(fh, "len=%d, offset=%d", req.Size, req.Offset)("read=%d, err=%v", &dataRead, &err)
data, err := fh.ReadFileHandle.Read(int64(req.Size), req.Offset)
if err != nil {
return translateError(err)
}
resp.Data = data
dataRead = len(data)
return nil
}
// Check interface satisfied
var _ fusefs.HandleFlusher = (*ReadFileHandle)(nil)
// Flush is called each time the file or directory is closed.
// Because there can be multiple file descriptors referring to a
// single opened file, Flush can be called multiple times.
func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
defer fs.Trace(fh, "")("err=%v", &err)
return translateError(fh.ReadFileHandle.Flush())
}
var _ fusefs.HandleReleaser = (*ReadFileHandle)(nil)
// Release is called when we are finished with the file handle
//
// It isn't called directly from userspace so the error is ignored by
// the kernel
func (fh *ReadFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
defer fs.Trace(fh, "")("err=%v", &err)
return translateError(fh.ReadFileHandle.Release())
}

View File

@@ -1,72 +0,0 @@
// +build ignore
// Read blocks out of a single file to time the seeking code
package main
import (
"flag"
"io"
"log"
"math/rand"
"os"
"time"
)
var (
// Flags
iterations = flag.Int("n", 25, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
randSeed = flag.Int64("seed", 1, "Seed for the random number generator")
)
func randomSeekTest(size int64, in *os.File, name string) {
start := rand.Int63n(size)
blockSize := rand.Intn(*maxBlockSize)
if int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in.Seek(start, 0)
if err != nil {
log.Fatalf("Seek failed on %q: %v", name, err)
}
buf := make([]byte, blockSize)
_, err = io.ReadFull(in, buf)
if err != nil {
log.Fatalf("Read failed on %q: %v", name, err)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require 1 file as argument")
}
rand.Seed(*randSeed)
name := args[0]
in, err := os.Open(name)
if err != nil {
log.Fatalf("Couldn't open %q: %v", name, err)
}
fi, err := in.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", name, err)
}
start := time.Now()
for i := 0; i < *iterations; i++ {
randomSeekTest(fi.Size(), in, name)
}
dt := time.Since(start)
log.Printf("That took %v for %d iterations, %v per iteration", dt, *iterations, dt/time.Duration(*iterations))
err = in.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", name, err)
}
}

View File

@@ -1,115 +0,0 @@
// +build ignore
// Read two files with lots of seeking to stress test the seek code
package main
import (
"bytes"
"flag"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"time"
)
var (
// Flags
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
start := rand.Int63n(size)
blockSize := rand.Intn(*maxBlockSize)
if int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("Reading %d from %d", blockSize, start)
_, err := in1.Seek(start, 0)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file1, err)
}
_, err = in2.Seek(start, 0)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file2, err)
}
buf1 := make([]byte, blockSize)
n1, err := io.ReadFull(in1, buf1)
if err != nil {
log.Fatalf("Read failed on %q: %v", file1, err)
}
buf2 := make([]byte, blockSize)
n2, err := io.ReadFull(in2, buf2)
if err != nil {
log.Fatalf("Read failed on %q: %v", file2, err)
}
if n1 != n2 {
log.Fatalf("Read different lengths %d (%q) != %d (%q)", n1, file1, n2, file2)
}
if !bytes.Equal(buf1, buf2) {
log.Printf("Dumping different blocks")
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z1: %v", err)
}
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
if err != nil {
log.Fatalf("Failed to write /tmp/z2: %v", err)
}
log.Fatalf("Read different contents - saved in /tmp/z1 and /tmp/z2")
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 2 {
log.Fatalf("Require 2 files as argument")
}
file1, file2 := args[0], args[1]
in1, err := os.Open(file1)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file1, err)
}
in2, err := os.Open(file2)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file2, err)
}
fi1, err := in1.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file1, err)
}
fi2, err := in2.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file2, err)
}
if fi1.Size() != fi2.Size() {
log.Fatalf("Files not the same size")
}
for i := 0; i < *iterations; i++ {
randomSeekTest(fi1.Size(), in1, in2, file1, file2)
}
err = in1.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file1, err)
}
err = in2.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file2, err)
}
}

View File

@@ -1,129 +0,0 @@
// +build ignore
// Read lots files with lots of simultaneous seeking to stress test the seek code
package main
import (
"flag"
"io"
"log"
"math/rand"
"os"
"path/filepath"
"sort"
"sync"
"time"
)
var (
// Flags
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")
mask = flag.Int64("mask", 0, "mask for seek, eg 0x7fff")
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func seekTest(n int, file string) {
in, err := os.Open(file)
if err != nil {
log.Fatalf("Couldn't open %q: %v", file, err)
}
fi, err := in.Stat()
if err != nil {
log.Fatalf("Couldn't stat %q: %v", file, err)
}
size := fi.Size()
// FIXME make sure we try start and end
maxBlockSize := *maxBlockSize
if int64(maxBlockSize) > size {
maxBlockSize = int(size)
}
for i := 0; i < n; i++ {
start := rand.Int63n(size)
if *mask != 0 {
start &^= *mask
}
blockSize := rand.Intn(maxBlockSize)
beyondEnd := false
switch rand.Intn(10) {
case 0:
start = 0
case 1:
start = size - int64(blockSize)
case 2:
// seek beyond the end
start = size + int64(blockSize)
beyondEnd = true
default:
}
if !beyondEnd && int64(blockSize) > size-start {
blockSize = int(size - start)
}
log.Printf("%s: Reading %d from %d", file, blockSize, start)
_, err = in.Seek(start, 0)
if err != nil {
log.Fatalf("Seek failed on %q: %v", file, err)
}
buf := make([]byte, blockSize)
n, err := io.ReadFull(in, buf)
if beyondEnd && err == io.EOF {
// OK
} else if err != nil {
log.Fatalf("Read failed on %q: %v (%d)", file, err, n)
}
}
err = in.Close()
if err != nil {
log.Fatalf("Error closing %q: %v", file, err)
}
}
// Find all the files in dir
func findFiles(dir string) (files []string) {
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() && info.Size() > 0 {
files = append(files, path)
}
return nil
})
sort.Strings(files)
return files
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require a directory as argument")
}
dir := args[0]
files := findFiles(dir)
jobs := make(chan string, *simultaneous)
var wg sync.WaitGroup
wg.Add(*simultaneous)
for i := 0; i < *simultaneous; i++ {
go func() {
defer wg.Done()
for file := range jobs {
seekTest(*seeksPerFile, file)
}
}()
}
for i := 0; i < *iterations; i++ {
i := rand.Intn(len(files))
jobs <- files[i]
//jobs <- files[i]
}
close(jobs)
wg.Wait()
}

View File

@@ -1,68 +0,0 @@
// +build linux darwin freebsd
package mount
import (
"errors"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"golang.org/x/net/context"
)
var errClosedFileHandle = errors.New("Attempt to use closed file handle")
// WriteFileHandle is an open for write handle on a File
type WriteFileHandle struct {
*mountlib.WriteFileHandle
}
// Check interface satisfied
var _ fusefs.Handle = (*WriteFileHandle)(nil)
// Check interface satisfied
var _ fusefs.HandleWriter = (*WriteFileHandle)(nil)
// Write data to the file handle
func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
defer fs.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
n, err := fh.WriteFileHandle.Write(req.Data, req.Offset)
if err != nil {
return translateError(err)
}
resp.Size = int(n)
return nil
}
// Flush is called on each close() of a file descriptor. So if a
// filesystem wants to return write errors in close() and the file has
// cached dirty data, this is a good place to write back data and
// return any errors. Since many applications ignore close() errors
// this is not always useful.
//
// NOTE: The flush() method may be called more than once for each
// open(). This happens if more than one file descriptor refers to an
// opened file due to dup(), dup2() or fork() calls. It is not
// possible to determine if a flush is final, so each flush should be
// treated equally. Multiple write-flush sequences are relatively
// rare, so this shouldn't be a problem.
//
// Filesystems shouldn't assume that flush will always be called after
// some writes, or that if will be called at all.
func (fh *WriteFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) (err error) {
defer fs.Trace(fh, "")("err=%v", &err)
return translateError(fh.WriteFileHandle.Flush())
}
var _ fusefs.HandleReleaser = (*WriteFileHandle)(nil)
// Release is called when we are finished with the file handle
//
// It isn't called directly from userspace so the error is ignored by
// the kernel
func (fh *WriteFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) (err error) {
defer fs.Trace(fh, "")("err=%v", &err)
return translateError(fh.WriteFileHandle.Release())
}

View File

@@ -1,60 +0,0 @@
package mountlib
import (
"time"
"github.com/ncw/rclone/fs"
)
// info to create a new object
type createInfo struct {
f fs.Fs
remote string
}
func newCreateInfo(f fs.Fs, remote string) *createInfo {
return &createInfo{
f: f,
remote: remote,
}
}
// Fs returns read only access to the Fs that this object is part of
func (ci *createInfo) Fs() fs.Info {
return ci.f
}
// String returns the remote path
func (ci *createInfo) String() string {
return ci.remote
}
// Remote returns the remote path
func (ci *createInfo) Remote() string {
return ci.remote
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (ci *createInfo) Hash(fs.HashType) (string, error) {
return "", fs.ErrHashUnsupported
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (ci *createInfo) ModTime() time.Time {
return time.Now()
}
// Size returns the size of the file
func (ci *createInfo) Size() int64 {
// FIXME this means this won't work with all remotes...
return 0
}
// Storable says whether this object can be stored
func (ci *createInfo) Storable() bool {
return true
}
var _ fs.ObjectInfo = (*createInfo)(nil)

View File

@@ -1,486 +0,0 @@
package mountlib
import (
"path"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// DirEntry describes the contents of a directory entry
//
// It can be a file or a directory
//
// node may be nil, but o may not
type DirEntry struct {
Obj fs.DirEntry
Node Node
}
// Dir represents a directory entry
type Dir struct {
fsys *FS
inode uint64 // inode number
f fs.Fs
path string
modTime time.Time
mu sync.Mutex // protects the following
read time.Time // time directory entry last read
items map[string]*DirEntry // NB can be nil when directory not read yet
}
func newDir(fsys *FS, f fs.Fs, fsDir fs.Directory) *Dir {
return &Dir{
fsys: fsys,
f: f,
path: fsDir.Remote(),
modTime: fsDir.ModTime(),
inode: NewInode(),
}
}
// String converts it to printablee
func (d *Dir) String() string {
if d == nil {
return "<nil *Dir>"
}
return d.path + "/"
}
// IsFile returns false for Dir - satisfies Node interface
func (d *Dir) IsFile() bool {
return false
}
// Inode returns the inode number - satisfies Node interface
func (d *Dir) Inode() uint64 {
return d.inode
}
// Node returns the Node assocuated with this - satisfies Noder interface
func (d *Dir) Node() Node {
return d
}
// ForgetAll ensures the directory and all its children are purged
// from the cache.
func (d *Dir) ForgetAll() {
d.ForgetPath("")
}
// ForgetPath clears the cache for itself and all subdirectories if
// they match the given path. The path is specified relative from the
// directory it is called from.
// It is not possible to traverse the directory tree upwards, i.e.
// you cannot clear the cache for the Dir's ancestors or siblings.
func (d *Dir) ForgetPath(relativePath string) {
absPath := path.Join(d.path, relativePath)
if absPath == "." {
absPath = ""
}
d.walk(absPath, func(dir *Dir) {
fs.Debugf(dir.path, "forgetting directory cache")
dir.read = time.Time{}
dir.items = nil
})
}
// walk runs a function on all directories whose path matches
// the given absolute one. It will be called on a directory's
// children first. It will not apply the function to parent
// nodes, regardless of the given path.
func (d *Dir) walk(absPath string, fun func(*Dir)) {
d.mu.Lock()
defer d.mu.Unlock()
if d.items != nil {
for _, entry := range d.items {
if dir, ok := entry.Node.(*Dir); ok {
dir.walk(absPath, fun)
}
}
}
if d.path == absPath || absPath == "" || strings.HasPrefix(d.path, absPath+"/") {
fun(d)
}
}
// rename should be called after the directory is renamed
//
// Reset the directory to new state, discarding all the objects and
// reading everything again
func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) {
d.ForgetAll()
d.path = fsDir.Remote()
d.modTime = fsDir.ModTime()
d.read = time.Time{}
}
// addObject adds a new object or directory to the directory
//
// note that we add new objects rather than updating old ones
func (d *Dir) addObject(o fs.DirEntry, node Node) *DirEntry {
item := &DirEntry{
Obj: o,
Node: node,
}
d.mu.Lock()
if d.items != nil {
d.items[path.Base(o.Remote())] = item
}
d.mu.Unlock()
return item
}
// delObject removes an object from the directory
func (d *Dir) delObject(leaf string) {
d.mu.Lock()
if d.items != nil {
delete(d.items, leaf)
}
d.mu.Unlock()
}
// read the directory and sets d.items - must be called with the lock held
func (d *Dir) _readDir() error {
when := time.Now()
if d.read.IsZero() || d.items == nil {
// fs.Debugf(d.path, "Reading directory")
} else {
age := when.Sub(d.read)
if age < d.fsys.dirCacheTime {
return nil
}
fs.Debugf(d.path, "Re-reading directory (%v old)", age)
}
entries, err := fs.ListDirSorted(d.f, false, d.path)
if err == fs.ErrorDirNotFound {
// We treat directory not found as empty because we
// create directories on the fly
} else if err != nil {
return err
}
// NB when we re-read a directory after its cache has expired
// we drop the old files which should lead to correct
// behaviour but may not be very efficient.
// Keep a note of the previous contents of the directory
oldItems := d.items
// Cache the items by name
d.items = make(map[string]*DirEntry, len(entries))
for _, entry := range entries {
switch item := entry.(type) {
case fs.Object:
obj := item
name := path.Base(obj.Remote())
d.items[name] = &DirEntry{
Obj: obj,
Node: nil,
}
case fs.Directory:
dir := item
name := path.Base(dir.Remote())
// Use old dir value if it exists
if oldItems != nil {
if oldItem, ok := oldItems[name]; ok {
if _, ok := oldItem.Obj.(fs.Directory); ok {
d.items[name] = oldItem
continue
}
}
}
d.items[name] = &DirEntry{
Obj: dir,
Node: nil,
}
default:
err = errors.Errorf("unknown type %T", item)
fs.Errorf(d.path, "readDir error: %v", err)
return err
}
}
d.read = when
return nil
}
// lookup a single item in the directory
//
// returns ENOENT if not found.
func (d *Dir) lookup(leaf string) (*DirEntry, error) {
d.mu.Lock()
defer d.mu.Unlock()
err := d._readDir()
if err != nil {
return nil, err
}
item, ok := d.items[leaf]
if !ok {
return nil, ENOENT
}
return item, nil
}
// Check to see if a directory is empty
func (d *Dir) isEmpty() (bool, error) {
d.mu.Lock()
defer d.mu.Unlock()
err := d._readDir()
if err != nil {
return false, err
}
return len(d.items) == 0, nil
}
// ModTime returns the modification time of the directory
func (d *Dir) ModTime() time.Time {
// fs.Debugf(d.path, "Dir.ModTime %v", d.modTime)
return d.modTime
}
// SetModTime sets the modTime for this dir
func (d *Dir) SetModTime(modTime time.Time) error {
if d.fsys.readOnly {
return EROFS
}
d.mu.Lock()
defer d.mu.Unlock()
d.modTime = modTime
return nil
}
// lookupNode calls lookup then makes sure the node is not nil in the DirEntry
func (d *Dir) lookupNode(leaf string) (item *DirEntry, err error) {
item, err = d.lookup(leaf)
if err != nil {
return nil, err
}
if item.Node != nil {
return item, nil
}
var node Node
switch x := item.Obj.(type) {
case fs.Object:
node, err = newFile(d, x, leaf), nil
case fs.Directory:
node, err = newDir(d.fsys, d.f, x), nil
default:
err = errors.Errorf("unknown type %T", item)
}
if err != nil {
return nil, err
}
item = d.addObject(item.Obj, node)
return item, nil
}
// Lookup looks up a specific entry in the receiver.
//
// Lookup should return a Node corresponding to the entry. If the
// name does not exist in the directory, Lookup should return ENOENT.
//
// Lookup need not to handle the names "." and "..".
func (d *Dir) Lookup(name string) (node Node, err error) {
path := path.Join(d.path, name)
// fs.Debugf(path, "Dir.Lookup")
item, err := d.lookupNode(name)
if err != nil {
if err != ENOENT {
fs.Errorf(path, "Dir.Lookup error: %v", err)
}
return nil, err
}
// fs.Debugf(path, "Dir.Lookup OK")
return item.Node, nil
}
// ReadDirAll reads the contents of the directory
func (d *Dir) ReadDirAll() (items []*DirEntry, err error) {
// fs.Debugf(d.path, "Dir.ReadDirAll")
d.mu.Lock()
defer d.mu.Unlock()
err = d._readDir()
if err != nil {
fs.Debugf(d.path, "Dir.ReadDirAll error: %v", err)
return nil, err
}
for _, item := range d.items {
items = append(items, item)
}
// fs.Debugf(d.path, "Dir.ReadDirAll OK with %d entries", len(items))
return items, nil
}
// Create makes a new file
func (d *Dir) Create(name string) (*File, *WriteFileHandle, error) {
if d.fsys.readOnly {
return nil, nil, EROFS
}
path := path.Join(d.path, name)
// fs.Debugf(path, "Dir.Create")
src := newCreateInfo(d.f, path)
// This gets added to the directory when the file is written
file := newFile(d, nil, name)
fh, err := newWriteFileHandle(d, file, src)
if err != nil {
fs.Errorf(path, "Dir.Create error: %v", err)
return nil, nil, err
}
// fs.Debugf(path, "Dir.Create OK")
return file, fh, nil
}
// Mkdir creates a new directory
func (d *Dir) Mkdir(name string) (*Dir, error) {
if d.fsys.readOnly {
return nil, EROFS
}
path := path.Join(d.path, name)
// fs.Debugf(path, "Dir.Mkdir")
err := d.f.Mkdir(path)
if err != nil {
fs.Errorf(path, "Dir.Mkdir failed to create directory: %v", err)
return nil, err
}
fsDir := fs.NewDir(path, time.Now())
dir := newDir(d.fsys, d.f, fsDir)
d.addObject(fsDir, dir)
// fs.Debugf(path, "Dir.Mkdir OK")
return dir, nil
}
// Remove removes the entry with the given name from
// the receiver, which must be a directory. The entry to be removed
// may correspond to a file (unlink) or to a directory (rmdir).
func (d *Dir) Remove(name string) error {
if d.fsys.readOnly {
return EROFS
}
path := path.Join(d.path, name)
// fs.Debugf(path, "Dir.Remove")
item, err := d.lookupNode(name)
if err != nil {
fs.Errorf(path, "Dir.Remove error: %v", err)
return err
}
switch x := item.Obj.(type) {
case fs.Object:
err = x.Remove()
if err != nil {
fs.Errorf(path, "Dir.Remove file error: %v", err)
return err
}
case fs.Directory:
// Check directory is empty first
dir := item.Node.(*Dir)
empty, err := dir.isEmpty()
if err != nil {
fs.Errorf(path, "Dir.Remove dir error: %v", err)
return err
}
if !empty {
fs.Errorf(path, "Dir.Remove not empty")
return ENOTEMPTY
}
// remove directory
err = d.f.Rmdir(path)
if err != nil {
fs.Errorf(path, "Dir.Remove failed to remove directory: %v", err)
return err
}
default:
fs.Errorf(path, "Dir.Remove unknown type %T", item)
return errors.Errorf("unknown type %T", item)
}
// Remove the item from the directory listing
d.delObject(name)
// fs.Debugf(path, "Dir.Remove OK")
return nil
}
// Rename the file
func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
if d.fsys.readOnly {
return EROFS
}
oldPath := path.Join(d.path, oldName)
newPath := path.Join(destDir.path, newName)
// fs.Debugf(oldPath, "Dir.Rename to %q", newPath)
oldItem, err := d.lookupNode(oldName)
if err != nil {
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
var newObj fs.DirEntry
oldNode := oldItem.Node
switch x := oldItem.Obj.(type) {
case fs.Object:
oldObject := x
// FIXME: could Copy then Delete if Move not available
// - though care needed if case insensitive...
doMove := d.f.Features().Move
if doMove == nil {
err := errors.Errorf("Fs %q can't rename files (no Move)", d.f)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newObject, err := doMove(oldObject, newPath)
if err != nil {
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newObj = newObject
// Update the node with the new details
if oldNode != nil {
if oldFile, ok := oldNode.(*File); ok {
fs.Debugf(oldItem.Obj, "Updating file with %v %p", newObject, oldFile)
oldFile.rename(destDir, newObject)
}
}
case fs.Directory:
doDirMove := d.f.Features().DirMove
if doDirMove == nil {
err := errors.Errorf("Fs %q can't rename directories (no DirMove)", d.f)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
srcRemote := x.Remote()
dstRemote := newPath
err = doDirMove(d.f, srcRemote, dstRemote)
if err != nil {
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newDir := fs.NewDirCopy(x).SetRemote(newPath)
newObj = newDir
// Update the node with the new details
if oldNode != nil {
if oldDir, ok := oldNode.(*Dir); ok {
fs.Debugf(oldItem.Obj, "Updating dir with %v %p", newDir, oldDir)
oldDir.rename(destDir, newDir)
}
}
default:
err = errors.Errorf("unknown type %T", oldItem)
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
return err
}
// Show moved - delete from old dir and add to new
d.delObject(oldName)
destDir.addObject(newObj, oldNode)
// fs.Debugf(newPath, "Dir.Rename renamed from %q", oldPath)
return nil
}
// Fsync the directory
//
// Note that we don't do anything except return OK
func (d *Dir) Fsync() error {
return nil
}

View File

@@ -1,39 +0,0 @@
// Cross platform errors
package mountlib
import "fmt"
// Error describes low level errors in a cross platform way
type Error byte
// NB if changing errors translateError in cmd/mount/fs.go, cmd/cmount/fs.go
// Low level errors
const (
OK Error = iota
ENOENT
ENOTEMPTY
EEXIST
ESPIPE
EBADF
EROFS
)
var errorNames = []string{
OK: "Success",
ENOENT: "No such file or directory",
ENOTEMPTY: "Directory not empty",
EEXIST: "File exists",
ESPIPE: "Illegal seek",
EBADF: "Bad file descriptor",
EROFS: "Read only file system",
}
// Error renders the error as a string
func (e Error) Error() string {
if int(e) >= len(errorNames) {
return fmt.Sprintf("Low level error %d", e)
}
return errorNames[e]
}

View File

@@ -1,220 +0,0 @@
package mountlib
import (
"path"
"sync"
"sync/atomic"
"time"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// File represents a file
type File struct {
inode uint64 // inode number
size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
d *Dir // parent directory - read only
mu sync.RWMutex // protects the following
o fs.Object // NB o may be nil if file is being written
leaf string // leaf name of the object
writers int // number of writers for this file
pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
}
// newFile creates a new File
func newFile(d *Dir, o fs.Object, leaf string) *File {
return &File{
d: d,
o: o,
leaf: leaf,
inode: NewInode(),
}
}
// String converts it to printable
func (f *File) String() string {
if f == nil {
return "<nil *File>"
}
return path.Join(f.d.path, f.leaf)
}
// IsFile returns true for File - satisfies Node interface
func (f *File) IsFile() bool {
return true
}
// Inode returns the inode number - satisfies Node interface
func (f *File) Inode() uint64 {
return f.inode
}
// Node returns the Node assocuated with this - satisfies Noder interface
func (f *File) Node() Node {
return f
}
// rename should be called to update f.o and f.d after a rename
func (f *File) rename(d *Dir, o fs.Object) {
f.mu.Lock()
f.o = o
f.d = d
f.mu.Unlock()
}
// addWriters increments or decrements the writers
func (f *File) addWriters(n int) {
f.mu.Lock()
f.writers += n
f.mu.Unlock()
}
// Attr fills out the attributes for the file
func (f *File) Attr(noModTime bool) (modTime time.Time, Size, Blocks uint64, err error) {
f.mu.Lock()
defer f.mu.Unlock()
// if o is nil it isn't valid yet or there are writers, so return the size so far
if f.o == nil || f.writers != 0 {
Size = uint64(atomic.LoadInt64(&f.size))
if !noModTime && !f.pendingModTime.IsZero() {
modTime = f.pendingModTime
}
} else {
Size = uint64(f.o.Size())
if !noModTime {
modTime = f.o.ModTime()
}
}
Blocks = (Size + 511) / 512
// fs.Debugf(f.o, "File.Attr modTime=%v, Size=%d, Blocks=%v", modTime, Size, Blocks)
return
}
// SetModTime sets the modtime for the file
func (f *File) SetModTime(modTime time.Time) error {
if f.d.fsys.readOnly {
return EROFS
}
f.mu.Lock()
defer f.mu.Unlock()
f.pendingModTime = modTime
if f.o != nil {
return f.applyPendingModTime()
}
// queue up for later, hoping f.o becomes available
return nil
}
// call with the mutex held
func (f *File) applyPendingModTime() error {
defer func() { f.pendingModTime = time.Time{} }()
if f.pendingModTime.IsZero() {
return nil
}
if f.o == nil {
return errors.New("Cannot apply ModTime, file object is not available")
}
err := f.o.SetModTime(f.pendingModTime)
switch err {
case nil:
fs.Debugf(f.o, "File.applyPendingModTime OK")
case fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
// do nothing, in order to not break "touch somefile" if it exists already
default:
fs.Errorf(f.o, "File.applyPendingModTime error: %v", err)
return err
}
return nil
}
// Update the size while writing
func (f *File) setSize(n int64) {
atomic.StoreInt64(&f.size, n)
}
// Update the object when written
func (f *File) setObject(o fs.Object) {
f.mu.Lock()
defer f.mu.Unlock()
f.o = o
_ = f.applyPendingModTime()
f.d.addObject(o, f)
}
// Wait for f.o to become non nil for a short time returning it or an
// error
//
// Call without the mutex held
func (f *File) waitForValidObject() (o fs.Object, err error) {
for i := 0; i < 50; i++ {
f.mu.Lock()
o = f.o
writers := f.writers
f.mu.Unlock()
if o != nil {
return o, nil
}
if writers == 0 {
return nil, errors.New("can't open file - writer failed")
}
time.Sleep(100 * time.Millisecond)
}
return nil, ENOENT
}
// OpenRead open the file for read
func (f *File) OpenRead() (fh *ReadFileHandle, err error) {
// if o is nil it isn't valid yet
o, err := f.waitForValidObject()
if err != nil {
return nil, err
}
// fs.Debugf(o, "File.OpenRead")
fh, err = newReadFileHandle(f, o)
err = errors.Wrap(err, "open for read")
if err != nil {
fs.Errorf(o, "File.OpenRead failed: %v", err)
return nil, err
}
return fh, nil
}
// OpenWrite open the file for write
func (f *File) OpenWrite() (fh *WriteFileHandle, err error) {
if f.d.fsys.readOnly {
return nil, EROFS
}
// if o is nil it isn't valid yet
o, err := f.waitForValidObject()
if err != nil {
return nil, err
}
// fs.Debugf(o, "File.OpenWrite")
src := newCreateInfo(f.d.f, o.Remote())
fh, err = newWriteFileHandle(f.d, f, src)
err = errors.Wrap(err, "open for write")
if err != nil {
fs.Errorf(o, "File.OpenWrite failed: %v", err)
return nil, err
}
return fh, nil
}
// Fsync the file
//
// Note that we don't do anything except return OK
func (f *File) Fsync() error {
return nil
}

View File

@@ -1,139 +0,0 @@
package mountlib
import (
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/ncw/rclone/fs"
)
// Node represents either a *Dir or a *File
type Node interface {
IsFile() bool
Inode() uint64
}
var (
_ Node = (*File)(nil)
_ Node = (*Dir)(nil)
)
// Noder represents something which can return a node
type Noder interface {
fmt.Stringer
Node() Node
}
var (
_ Noder = (*File)(nil)
_ Noder = (*Dir)(nil)
_ Noder = (*ReadFileHandle)(nil)
_ Noder = (*WriteFileHandle)(nil)
)
// FS represents the top level filing system
type FS struct {
f fs.Fs
root *Dir
noSeek bool // don't allow seeking if set
noChecksum bool // don't check checksums if set
readOnly bool // if set FS is read only
dirCacheTime time.Duration // how long to consider directory listing cache valid
}
// NewFS creates a new filing system and root directory
func NewFS(f fs.Fs) *FS {
fsDir := fs.NewDir("", time.Now())
fsys := &FS{
f: f,
}
if NoSeek {
fsys.noSeek = true
}
if NoChecksum {
fsys.noChecksum = true
}
if ReadOnly {
fsys.readOnly = true
}
fsys.dirCacheTime = DirCacheTime
fsys.root = newDir(fsys, f, fsDir)
if PollInterval > 0 {
fsys.PollChanges(PollInterval)
}
return fsys
}
// PollChanges will poll the remote every pollInterval for changes if the remote
// supports it. If a non-polling option is used, the given time interval can be
// ignored
func (fsys *FS) PollChanges(pollInterval time.Duration) *FS {
doDirChangeNotify := fsys.f.Features().DirChangeNotify
if doDirChangeNotify != nil {
doDirChangeNotify(fsys.root.ForgetPath, pollInterval)
}
return fsys
}
// Root returns the root node
func (fsys *FS) Root() (*Dir, error) {
// fs.Debugf(fsys.f, "Root()")
return fsys.root, nil
}
var inodeCount uint64
// NewInode creates a new unique inode number
func NewInode() (inode uint64) {
return atomic.AddUint64(&inodeCount, 1)
}
// Lookup finds the Node by path starting from the root
func (fsys *FS) Lookup(path string) (node Node, err error) {
node = fsys.root
for path != "" {
i := strings.IndexRune(path, '/')
var name string
if i < 0 {
name, path = path, ""
} else {
name, path = path[:i], path[i+1:]
}
if name == "" {
continue
}
dir, ok := node.(*Dir)
if !ok {
// We need to look in a directory, but found a file
return nil, ENOENT
}
node, err = dir.Lookup(name)
if err != nil {
return nil, err
}
}
return
}
// Statfs is called to obtain file system metadata.
// It should write that data to resp.
func (fsys *FS) Statfs() error {
/* FIXME
const blockSize = 4096
const fsBlocks = (1 << 50) / blockSize
resp.Blocks = fsBlocks // Total data blocks in file system.
resp.Bfree = fsBlocks // Free blocks in file system.
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
resp.Files = 1E9 // Total files in file system.
resp.Ffree = 1E9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
*/
return nil
}

View File

@@ -1,193 +0,0 @@
package mountlib
// Globals
import (
"log"
"os"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/spf13/cobra"
)
// Options set by command line flags
var (
NoModTime = false
NoChecksum = false
DebugFUSE = false
NoSeek = false
DirCacheTime = 5 * 60 * time.Second
PollInterval = time.Minute
// mount options
ReadOnly = false
AllowNonEmpty = false
AllowRoot = false
AllowOther = false
DefaultPermissions = false
WritebackCache = false
MaxReadAhead fs.SizeSuffix = 128 * 1024
Umask = 0
UID = ^uint32(0) // these values instruct WinFSP-FUSE to use the current user
GID = ^uint32(0) // overriden for non windows in mount_unix.go
// foreground = false
// default permissions for directories - modified by umask in Mount
DirPerms = os.FileMode(0777)
FilePerms = os.FileMode(0666)
ExtraOptions *[]string
ExtraFlags *[]string
)
// NewMountCommand makes a mount command with the given name and Mount function
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
var commandDefintion = &cobra.Command{
Use: commandName + " remote:path /path/to/mountpoint",
Short: `Mount the remote as a mountpoint. **EXPERIMENTAL**`,
Long: `
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with
FUSE.
This is **EXPERIMENTAL** - use with care.
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
Start the mount like this
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
Or on Windows like this where X: is an unused drive letter
rclone ` + commandName + ` remote:path/to/files X:
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
the mount is automatically stopped.
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually with
# Linux
fusermount -u /path/to/local/mount
# OS X
umount /path/to/local/mount
### Installing on Windows ###
To run rclone ` + commandName + ` on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/).
WinFsp is an [open source](https://github.com/billziss-gh/winfsp)
Windows File System Proxy which makes it easy to write user space file
systems for Windows. It provides a FUSE emulation layer which rclone
uses combination with
[cgofuse](https://github.com/billziss-gh/cgofuse). Both of these
packages are by Bill Zissimopoulos who was very helpful during the
implementation of rclone ` + commandName + ` for Windows.
#### Windows caveats ####
Note that drives created as Administrator are not visible by other
accounts (including the account that was elevated as
Administrator). So if you start a Windows drive from an Administrative
Command Prompt and then try to access the same drive from Explorer
(which does not run as Administrator), you will not be able to see the
new drive.
The easiest way around this is to start the drive from a normal
command prompt. It is also possible to start a drive from the SYSTEM
account (using [the WinFsp.Launcher
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
which creates drives accessible for everyone on the system.
### Limitations ###
This can only write files seqentially, it can only seek when reading.
This means that many applications won't work with their files on an
rclone mount.
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
Hubic) won't work from the root - you will need to specify a bucket,
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
None of these support the concept of directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.
Only supported on Linux, FreeBSD, OS X and Windows at the moment.
### rclone ` + commandName + ` vs rclone sync/copy ##
File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone ` + commandName + `
can't use retries in the same way without making local copies of the
uploads. This might happen in the future, but for the moment rclone
` + commandName + ` won't do that, so will be less reliable than the rclone command.
### Filters ###
Note that all the rclone filters can be used to select a subset of the
files to be visible in the mount.
### Directory Cache ###
Using the ` + "`--dir-cache-time`" + ` flag, you can set how long a
directory should be considered up to date and not refreshed from the
backend. Changes made locally in the mount may appear immediately or
invalidate the cache. However, changes done on the remote will only
be picked up once the cache expires.
Alternatively, you can send a ` + "`SIGHUP`" + ` signal to rclone for
it to flush all directory caches, regardless of how old they are.
Assuming only one rclone instance is running, you can reset the cache
like this:
kill -SIGHUP $(pidof rclone)
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
fdst := cmd.NewFsDst(args)
// Mask permissions
DirPerms = 0777 &^ os.FileMode(Umask)
FilePerms = 0666 &^ os.FileMode(Umask)
// Show stats if the user has specifically requested them
if cmd.ShowStats() {
stopStats := cmd.StartStats()
defer close(stopStats)
}
err := Mount(fdst, args[1])
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
},
}
// Register the command
cmd.Root.AddCommand(commandDefintion)
// Add flags
flags := commandDefintion.Flags()
flags.BoolVarP(&NoModTime, "no-modtime", "", NoModTime, "Don't read/write the modification time (can speed things up).")
flags.BoolVarP(&NoChecksum, "no-checksum", "", NoChecksum, "Don't compare checksums on up/download.")
flags.BoolVarP(&DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
flags.BoolVarP(&NoSeek, "no-seek", "", NoSeek, "Don't allow seeking in files.")
flags.DurationVarP(&DirCacheTime, "dir-cache-time", "", DirCacheTime, "Time to cache directory entries for.")
flags.DurationVarP(&PollInterval, "poll-interval", "", PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
// mount options
flags.BoolVarP(&ReadOnly, "read-only", "", ReadOnly, "Mount read-only.")
flags.BoolVarP(&AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
flags.BoolVarP(&AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
flags.BoolVarP(&AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
flags.BoolVarP(&DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
flags.BoolVarP(&WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
flags.VarP(&MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
ExtraOptions = flags.StringArrayP("option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
ExtraFlags = flags.StringArrayP("fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
//flags.BoolVarP(&foreground, "foreground", "", foreground, "Do not detach.")
platformFlags(flags)
return commandDefintion
}

View File

@@ -1,11 +0,0 @@
// +build !linux,!darwin,!freebsd
package mountlib
import (
"github.com/spf13/pflag"
)
// add any extra platform specific flags
func platformFlags(flags *pflag.FlagSet) {
}

View File

@@ -1,19 +0,0 @@
// +build linux darwin freebsd
package mountlib
import (
"github.com/spf13/pflag"
"golang.org/x/sys/unix"
)
// add any extra platform specific flags
func platformFlags(flags *pflag.FlagSet) {
flags.IntVarP(&Umask, "umask", "", Umask, "Override the permission bits set by the filesystem.")
Umask = unix.Umask(0) // read the umask
unix.Umask(Umask) // set it back to what it was
UID = uint32(unix.Geteuid())
GID = uint32(unix.Getegid())
flags.Uint32VarP(&UID, "uid", "", UID, "Override the uid field set by the filesystem.")
flags.Uint32VarP(&GID, "gid", "", GID, "Override the gid field set by the filesystem.")
}

View File

@@ -1,229 +0,0 @@
package mounttest
import (
"io/ioutil"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDirLs checks out listing
func TestDirLs(t *testing.T) {
run.skipIfNoFUSE(t)
run.checkDir(t, "")
run.mkdir(t, "a directory")
run.createFile(t, "a file", "hello")
run.checkDir(t, "a directory/|a file 5")
run.rmdir(t, "a directory")
run.rm(t, "a file")
run.checkDir(t, "")
}
// TestDirCreateAndRemoveDir tests creating and removing a directory
func TestDirCreateAndRemoveDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir/subdir")
run.checkDir(t, "dir/|dir/subdir/")
// Check we can't delete a directory with stuff in
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete subdir then dir - should produce no errors
run.rmdir(t, "dir/subdir")
run.checkDir(t, "dir/")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirCreateAndRemoveFile tests creating and removing a file
func TestDirCreateAndRemoveFile(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "dir/file", "potato")
run.checkDir(t, "dir/|dir/file 6")
// Check we can't delete a directory with stuff in
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete file
run.rm(t, "dir/file")
run.checkDir(t, "dir/")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameFile tests renaming a file
func TestDirRenameFile(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "file", "potato")
run.checkDir(t, "dir/|file 6")
err := os.Rename(run.path("file"), run.path("file2"))
require.NoError(t, err)
run.checkDir(t, "dir/|file2 6")
data, err := ioutil.ReadFile(run.path("file2"))
require.NoError(t, err)
assert.Equal(t, "potato", string(data))
err = os.Rename(run.path("file2"), run.path("dir/file3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/file3 6")
data, err = ioutil.ReadFile(run.path("dir/file3"))
require.NoError(t, err)
assert.Equal(t, "potato", string(data))
run.rm(t, "dir/file3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameEmptyDir tests renaming and empty directory
func TestDirRenameEmptyDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir1")
run.checkDir(t, "dir/|dir1/")
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/")
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/")
run.rmdir(t, "dir/dir3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirRenameFullDir tests renaming a full directory
func TestDirRenameFullDir(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.mkdir(t, "dir1")
run.createFile(t, "dir1/potato.txt", "maris piper")
run.checkDir(t, "dir/|dir1/|dir1/potato.txt 11")
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/|dir/dir2/potato.txt 11")
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/|dir/dir3/potato.txt 11")
run.rm(t, "dir/dir3/potato.txt")
run.rmdir(t, "dir/dir3")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirModTime tests mod times
func TestDirModTime(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
err := os.Chtimes(run.path("dir"), mtime, mtime)
require.NoError(t, err)
info, err := os.Stat(run.path("dir"))
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rmdir(t, "dir")
}
// TestDirCacheFlush tests fluching the dir cache
func TestDirCacheFlush(t *testing.T) {
run.skipIfNoFUSE(t)
run.checkDir(t, "")
run.mkdir(t, "dir")
run.mkdir(t, "otherdir")
run.createFile(t, "dir/file", "1")
run.createFile(t, "otherdir/file", "1")
dm := newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1")
localDm := make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
err := run.fremote.Mkdir("dir/subdir")
require.NoError(t, err)
root, err := run.filesys.Root()
require.NoError(t, err)
// expect newly created "subdir" on remote to not show up
root.ForgetPath("otherdir")
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
root.ForgetPath("dir")
dm = newDirMap("otherdir/|otherdir/file 1|dir/|dir/file 1|dir/subdir/")
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
run.rm(t, "otherdir/file")
run.rmdir(t, "otherdir")
run.rm(t, "dir/file")
run.rmdir(t, "dir/subdir")
run.rmdir(t, "dir")
run.checkDir(t, "")
}
// TestDirCacheFlushOnDirRename tests flushing the dir cache on rename
func TestDirCacheFlushOnDirRename(t *testing.T) {
run.skipIfNoFUSE(t)
run.mkdir(t, "dir")
run.createFile(t, "dir/file", "1")
dm := newDirMap("dir/|dir/file 1")
localDm := make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
// expect remotely created directory to not show up
err := run.fremote.Mkdir("dir/subdir")
require.NoError(t, err)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
err = os.Rename(run.path("dir"), run.path("rid"))
require.NoError(t, err)
dm = newDirMap("rid/|rid/subdir/|rid/file 1")
localDm = make(dirMap)
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
run.rm(t, "rid/file")
run.rmdir(t, "rid/subdir")
run.rmdir(t, "rid")
run.checkDir(t, "")
}

View File

@@ -1,57 +0,0 @@
package mounttest
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestFileModTime tests mod times on files
func TestFileModTime(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "file", "123")
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
err := os.Chtimes(run.path("file"), mtime, mtime)
require.NoError(t, err)
info, err := os.Stat(run.path("file"))
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "file")
}
// TestFileModTimeWithOpenWriters tests mod time on open files
func TestFileModTimeWithOpenWriters(t *testing.T) {
run.skipIfNoFUSE(t)
mtime := time.Date(2012, 11, 18, 17, 32, 31, 0, time.UTC)
filepath := run.path("cp-archive-test")
f, err := os.Create(filepath)
require.NoError(t, err)
_, err = f.Write([]byte{104, 105})
require.NoError(t, err)
err = os.Chtimes(filepath, mtime, mtime)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
info, err := os.Stat(filepath)
require.NoError(t, err)
// avoid errors because of timezone differences
assert.Equal(t, info.ModTime().Unix(), mtime.Unix())
run.rm(t, "cp-archive-test")
}

View File

@@ -1,296 +0,0 @@
// Test suite for rclonefs
package mounttest
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"runtime"
"strings"
"testing"
"time"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
_ "github.com/ncw/rclone/fs/all" // import all the file systems
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type (
// UnmountFn is called to unmount the file system
UnmountFn func() error
// MountFn is called to mount the file system
MountFn func(f fs.Fs, mountpoint string) (*mountlib.FS, <-chan error, func() error, error)
)
var (
mountFn MountFn
)
// TestMain drives the tests
func TestMain(m *testing.M, fn MountFn) {
mountFn = fn
flag.Parse()
run = newRun()
rc := m.Run()
run.Finalise()
os.Exit(rc)
}
// Run holds the remotes for a test run
type Run struct {
filesys *mountlib.FS
mountPath string
fremote fs.Fs
fremoteName string
cleanRemote func()
umountResult <-chan error
umountFn UnmountFn
skip bool
}
// run holds the master Run data
var run *Run
// newRun initialise the remote mount for testing and returns a run
// object.
//
// r.fremote is an empty remote Fs
//
// Finalise() will tidy them away when done.
func newRun() *Run {
r := &Run{
umountResult: make(chan error, 1),
}
fstest.Initialise()
var err error
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
if err != nil {
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
}
err = r.fremote.Mkdir("")
if err != nil {
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
}
if runtime.GOOS != "windows" {
r.mountPath, err = ioutil.TempDir("", "rclonefs-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Fatalf("Couldn't find free drive letter for test")
found:
r.mountPath = drive
}
// Mount it up
r.mount()
return r
}
func (r *Run) mount() {
log.Printf("mount %q %q", r.fremote, r.mountPath)
var err error
r.filesys, r.umountResult, r.umountFn, err = mountFn(r.fremote, r.mountPath)
if err != nil {
log.Printf("mount failed: %v", err)
r.skip = true
}
log.Printf("mount OK")
}
func (r *Run) umount() {
if r.skip {
log.Printf("FUSE not found so skipping umount")
return
}
/*
log.Printf("Calling fusermount -u %q", r.mountPath)
err := exec.Command("fusermount", "-u", r.mountPath).Run()
if err != nil {
log.Printf("fusermount failed: %v", err)
}
*/
log.Printf("Unmounting %q", r.mountPath)
err := r.umountFn()
if err != nil {
log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
err = r.umountFn()
}
if err != nil {
log.Fatalf("signal to umount failed: %v", err)
}
log.Printf("Waiting for umount")
err = <-r.umountResult
if err != nil {
log.Fatalf("umount failed: %v", err)
}
}
func (r *Run) skipIfNoFUSE(t *testing.T) {
if r.skip {
t.Skip("FUSE not found so skipping test")
}
}
// Finalise cleans the remote and unmounts
func (r *Run) Finalise() {
r.umount()
r.cleanRemote()
err := os.RemoveAll(r.mountPath)
if err != nil {
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
}
}
func (r *Run) path(filepath string) string {
// return windows drive letter root as E:/
if filepath == "" && runtime.GOOS == "windows" {
return run.mountPath + "/"
}
return path.Join(run.mountPath, filepath)
}
type dirMap map[string]struct{}
// Create a dirMap from a string
func newDirMap(dirString string) (dm dirMap) {
dm = make(dirMap)
for _, entry := range strings.Split(dirString, "|") {
if entry != "" {
dm[entry] = struct{}{}
}
}
return dm
}
// Returns a dirmap with only the files in
func (dm dirMap) filesOnly() dirMap {
newDm := make(dirMap)
for name := range dm {
if !strings.HasSuffix(name, "/") {
newDm[name] = struct{}{}
}
}
return newDm
}
// reads the local tree into dir
func (r *Run) readLocal(t *testing.T, dir dirMap, filepath string) {
realPath := r.path(filepath)
files, err := ioutil.ReadDir(realPath)
require.NoError(t, err)
for _, fi := range files {
name := path.Join(filepath, fi.Name())
if fi.IsDir() {
dir[name+"/"] = struct{}{}
r.readLocal(t, dir, name)
assert.Equal(t, mountlib.DirPerms, fi.Mode().Perm())
} else {
dir[fmt.Sprintf("%s %d", name, fi.Size())] = struct{}{}
assert.Equal(t, mountlib.FilePerms, fi.Mode().Perm())
}
}
}
// reads the remote tree into dir
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
objs, dirs, err := fs.WalkGetAll(r.fremote, filepath, true, 1)
if err == fs.ErrorDirNotFound {
return
}
require.NoError(t, err)
for _, obj := range objs {
dir[fmt.Sprintf("%s %d", obj.Remote(), obj.Size())] = struct{}{}
}
for _, d := range dirs {
name := d.Remote()
dir[name+"/"] = struct{}{}
r.readRemote(t, dir, name)
}
}
// checkDir checks the local and remote against the string passed in
func (r *Run) checkDir(t *testing.T, dirString string) {
dm := newDirMap(dirString)
localDm := make(dirMap)
r.readLocal(t, localDm, "")
remoteDm := make(dirMap)
r.readRemote(t, remoteDm, "")
// Ignore directories for remote compare
assert.Equal(t, dm.filesOnly(), remoteDm.filesOnly(), "expected vs remote")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
}
func (r *Run) createFile(t *testing.T, filepath string, contents string) {
filepath = r.path(filepath)
err := ioutil.WriteFile(filepath, []byte(contents), 0600)
require.NoError(t, err)
}
func (r *Run) readFile(t *testing.T, filepath string) string {
filepath = r.path(filepath)
result, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
return string(result)
}
func (r *Run) mkdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Mkdir(filepath, 0700)
require.NoError(t, err)
}
func (r *Run) rm(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
}
func (r *Run) rmdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
}
// TestMount checks that the Fs is mounted by seeing if the mountpoint
// is in the mount output
func TestMount(t *testing.T) {
run.skipIfNoFUSE(t)
out, err := exec.Command("mount").Output()
require.NoError(t, err)
assert.Contains(t, string(out), run.mountPath)
}
// TestRoot checks root directory is present and correct
func TestRoot(t *testing.T) {
run.skipIfNoFUSE(t)
fi, err := os.Lstat(run.mountPath)
require.NoError(t, err)
assert.True(t, fi.IsDir())
assert.Equal(t, fi.Mode().Perm(), mountlib.DirPerms)
}

View File

@@ -1,125 +0,0 @@
package mounttest
import (
"io"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
// TestReadByByte reads by byte including don't read any bytes
func TestReadByByte(t *testing.T) {
run.skipIfNoFUSE(t)
var data = []byte("hellohello")
run.createFile(t, "testfile", string(data))
run.checkDir(t, "testfile 10")
for i := 0; i < len(data); i++ {
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
for j := 0; j < i; j++ {
buf := make([]byte, 1)
n, err := io.ReadFull(fd, buf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
assert.Equal(t, buf[0], data[j])
}
err = fd.Close()
assert.NoError(t, err)
}
run.rm(t, "testfile")
}
// TestReadChecksum checks the checksum reading is working
func TestReadChecksum(t *testing.T) {
run.skipIfNoFUSE(t)
// create file big enough so we exceed any single FUSE read
// request
b := make([]rune, 3*128*1024)
for i := range b {
b[i] = 'r'
}
run.createFile(t, "bigfile", string(b))
// The hash comparison would fail in Flush, if we did not
// ensure we read the whole file
fd, err := os.Open(run.path("bigfile"))
assert.NoError(t, err)
buf := make([]byte, 10)
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
err = fd.Close()
assert.NoError(t, err)
// The hash comparison would fail, because we only read parts
// of the file
fd, err = os.Open(run.path("bigfile"))
assert.NoError(t, err)
// read at start
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
// read at end
_, err = fd.Seek(int64(len(b)-len(buf)), 0)
assert.NoError(t, err)
_, err = io.ReadFull(fd, buf)
assert.NoError(t, err)
// ensure we don't compare hashes
err = fd.Close()
assert.NoError(t, err)
run.rm(t, "bigfile")
}
// TestReadSeek test seeking
func TestReadSeek(t *testing.T) {
run.skipIfNoFUSE(t)
var data = []byte("helloHELLO")
run.createFile(t, "testfile", string(data))
run.checkDir(t, "testfile 10")
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
// Seek to half way
_, err = fd.Seek(5, 0)
assert.NoError(t, err)
buf, err := ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte("HELLO"))
// Test seeking to the end
_, err = fd.Seek(10, 0)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte(""))
// Test seeking beyond the end
_, err = fd.Seek(1000000, 0)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte(""))
// Now back to the start
_, err = fd.Seek(0, 0)
assert.NoError(t, err)
buf, err = ioutil.ReadAll(fd)
assert.NoError(t, err)
assert.Equal(t, buf, []byte("helloHELLO"))
err = fd.Close()
assert.NoError(t, err)
run.rm(t, "testfile")
}

View File

@@ -1,13 +0,0 @@
// +build !linux,!darwin,!freebsd
package mounttest
import (
"runtime"
"testing"
)
// TestReadFileDoubleClose tests double close on read
func TestReadFileDoubleClose(t *testing.T) {
t.Skip("not supported on " + runtime.GOOS)
}

View File

@@ -1,53 +0,0 @@
// +build linux darwin freebsd
package mounttest
import (
"os"
"syscall"
"testing"
"github.com/stretchr/testify/assert"
)
// TestReadFileDoubleClose tests double close on read
func TestReadFileDoubleClose(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testdoubleclose", "hello")
in, err := os.Open(run.path("testdoubleclose"))
assert.NoError(t, err)
fd := in.Fd()
fd1, err := syscall.Dup(int(fd))
assert.NoError(t, err)
fd2, err := syscall.Dup(int(fd))
assert.NoError(t, err)
// close one of the dups - should produce no error
err = syscall.Close(fd1)
assert.NoError(t, err)
// read from the file
buf := make([]byte, 1)
_, err = in.Read(buf)
assert.NoError(t, err)
// close it
err = in.Close()
assert.NoError(t, err)
// read from the other dup - should produce no error as this
// file is now buffered
n, err := syscall.Read(fd2, buf)
assert.NoError(t, err)
assert.Equal(t, 1, n)
// close the dup - should not produce an error
err = syscall.Close(fd2)
assert.NoError(t, err, "input/output error")
run.rm(t, "testdoubleclose")
}

View File

@@ -1,81 +0,0 @@
package mounttest
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestWriteFileNoWrite tests writing a file with no write()'s to it
func TestWriteFileNoWrite(t *testing.T) {
run.skipIfNoFUSE(t)
fd, err := os.Create(run.path("testnowrite"))
assert.NoError(t, err)
err = fd.Close()
assert.NoError(t, err)
// FIXME - wait for the Release on the file
time.Sleep(10 * time.Millisecond)
run.checkDir(t, "testnowrite 0")
run.rm(t, "testnowrite")
}
// FIXMETestWriteOpenFileInDirListing tests open file in directory listing
func FIXMETestWriteOpenFileInDirListing(t *testing.T) {
run.skipIfNoFUSE(t)
fd, err := os.Create(run.path("testnowrite"))
assert.NoError(t, err)
run.checkDir(t, "testnowrite 0")
err = fd.Close()
assert.NoError(t, err)
run.rm(t, "testnowrite")
}
// TestWriteFileWrite tests writing a file and reading it back
func TestWriteFileWrite(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testwrite", "data")
run.checkDir(t, "testwrite 4")
contents := run.readFile(t, "testwrite")
assert.Equal(t, "data", contents)
run.rm(t, "testwrite")
}
// TestWriteFileOverwrite tests overwriting a file
func TestWriteFileOverwrite(t *testing.T) {
run.skipIfNoFUSE(t)
run.createFile(t, "testwrite", "data")
run.checkDir(t, "testwrite 4")
run.createFile(t, "testwrite", "potato")
contents := run.readFile(t, "testwrite")
assert.Equal(t, "potato", contents)
run.rm(t, "testwrite")
}
// TestWriteFileFsync tests Fsync
//
// NB the code for this is in file.go rather than write.go
func TestWriteFileFsync(t *testing.T) {
filepath := run.path("to be synced")
fd, err := os.Create(filepath)
require.NoError(t, err)
_, err = fd.Write([]byte("hello"))
require.NoError(t, err)
err = fd.Sync()
require.NoError(t, err)
err = fd.Close()
require.NoError(t, err)
}

Some files were not shown because too many files have changed in this diff Show More