mirror of
https://github.com/rclone/rclone.git
synced 2025-12-15 15:53:41 +00:00
Compare commits
135 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ed2de3d6e | ||
|
|
4f35fb59c8 | ||
|
|
59ba8f28c8 | ||
|
|
d298b578ab | ||
|
|
fabbc035c4 | ||
|
|
6530b07cde | ||
|
|
f8b7eaec93 | ||
|
|
5c226e91c0 | ||
|
|
8e3d45d2dc | ||
|
|
a96b522958 | ||
|
|
fedf81c2b7 | ||
|
|
0c6f816a49 | ||
|
|
dfe771fb0c | ||
|
|
bc19e2d84b | ||
|
|
8c4d91cff7 | ||
|
|
2fcc18779b | ||
|
|
96cc3e5a0b | ||
|
|
cc8fe0630c | ||
|
|
1d9e76bb0f | ||
|
|
337110b7a0 | ||
|
|
83733205f6 | ||
|
|
d8306938a1 | ||
|
|
88ea8b305d | ||
|
|
e2f4d7b5e3 | ||
|
|
8140869767 | ||
|
|
6a8de87116 | ||
|
|
0da6f24221 | ||
|
|
771e60bd07 | ||
|
|
40b3c4883f | ||
|
|
e320f4a988 | ||
|
|
5835f15f21 | ||
|
|
67c311233b | ||
|
|
3fcff32524 | ||
|
|
472f065ce7 | ||
|
|
6c6d7eb770 | ||
|
|
c646ada3c3 | ||
|
|
f55359b3ac | ||
|
|
9d9a17547a | ||
|
|
c6dc88766b | ||
|
|
754ce9dec6 | ||
|
|
bd5f685d0a | ||
|
|
c663e24669 | ||
|
|
5948764e9e | ||
|
|
539ad44757 | ||
|
|
74994a2ec1 | ||
|
|
97dced6a0b | ||
|
|
e04acb09ce | ||
|
|
87ed7fc932 | ||
|
|
90744301d3 | ||
|
|
bf4879f57f | ||
|
|
e22b445cff | ||
|
|
5ab7970e18 | ||
|
|
e984eeedc4 | ||
|
|
968b5a0984 | ||
|
|
7af1282375 | ||
|
|
d9fcc32f70 | ||
|
|
870a9fc3b2 | ||
|
|
8e3703abeb | ||
|
|
ba81277bbe | ||
|
|
88293a4b8a | ||
|
|
981104519e | ||
|
|
1d254a3674 | ||
|
|
f88d171afd | ||
|
|
ba2091725e | ||
|
|
7c120b8bc5 | ||
|
|
5cc5429f99 | ||
|
|
09d71239b6 | ||
|
|
c643e4585e | ||
|
|
873db29391 | ||
|
|
81a933ae38 | ||
|
|
ecb3c7bcef | ||
|
|
80000b904c | ||
|
|
c47c9cd440 | ||
|
|
b4a0941d4c | ||
|
|
c03d6a1ec3 | ||
|
|
46d39ebaf7 | ||
|
|
fe68737268 | ||
|
|
2360bf907a | ||
|
|
aa093e991e | ||
|
|
a5974999eb | ||
|
|
24a6ff54c2 | ||
|
|
e89ea3360e | ||
|
|
85f8552c4d | ||
|
|
a287e3ced7 | ||
|
|
8e4d8d13b8 | ||
|
|
cf208ad21b | ||
|
|
0faed16899 | ||
|
|
8d1c0ad07c | ||
|
|
165e89c266 | ||
|
|
b4e19cfd62 | ||
|
|
20ad96f3cd | ||
|
|
d64a37772f | ||
|
|
5fb6f94579 | ||
|
|
20535348db | ||
|
|
3d83a265c5 | ||
|
|
18a8a61cc5 | ||
|
|
1758621a51 | ||
|
|
5710247bf6 | ||
|
|
78b03929b7 | ||
|
|
492362ec7d | ||
|
|
51b24a1dc6 | ||
|
|
cfdb48c864 | ||
|
|
14567952b3 | ||
|
|
2b052671e2 | ||
|
|
439a126af6 | ||
|
|
0fb35f081a | ||
|
|
9ba25c7219 | ||
|
|
af9c447146 | ||
|
|
ee6b39aa6c | ||
|
|
839133c5e1 | ||
|
|
f4eb48e531 | ||
|
|
18439cf2d7 | ||
|
|
d3c16608e4 | ||
|
|
3e27ff1b95 | ||
|
|
ff91698fb5 | ||
|
|
c389616657 | ||
|
|
442578ca25 | ||
|
|
0b51d6221a | ||
|
|
2f9f9afac2 | ||
|
|
9711a5d647 | ||
|
|
cc679aa714 | ||
|
|
457ef2c190 | ||
|
|
17ffb0855f | ||
|
|
125fc8f1f0 | ||
|
|
1660903aa2 | ||
|
|
b013c58537 | ||
|
|
a5b0d88608 | ||
|
|
02d50f8c6e | ||
|
|
e09ef62d5b | ||
|
|
a75bc0703f | ||
|
|
80ecea82e8 | ||
|
|
54cd46372a | ||
|
|
282cba20a0 | ||
|
|
2479ce2c8e | ||
|
|
9aa4b6bd9b |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -4,6 +4,7 @@ rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
README.html
|
||||
README.txt
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- tip
|
||||
|
||||
script:
|
||||
|
||||
40
Makefile
40
Makefile
@@ -2,39 +2,46 @@ TAG := $(shell git describe --tags)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
|
||||
rclone: *.go */*.go
|
||||
rclone:
|
||||
@go version
|
||||
go build
|
||||
go install -v ./...
|
||||
|
||||
doc: rclone.1 README.html README.txt
|
||||
test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
|
||||
rclone.1: README.md
|
||||
pandoc -s --from markdown --to man README.md -o rclone.1
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
README.html: README.md
|
||||
pandoc -s --from markdown_github --to html README.md -o README.html
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
README.txt: README.md
|
||||
pandoc -s --from markdown_github --to plain README.md -o README.txt
|
||||
MANUAL.md: make_manual.py docs/content/*.md
|
||||
./make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin rclone
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
|
||||
upload_website: website
|
||||
./rclone -v sync docs/public memstore:www-rclone-org
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
upload:
|
||||
./rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
|
||||
cross: doc
|
||||
./cross-compile $(TAG)
|
||||
@@ -48,12 +55,15 @@ tag:
|
||||
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in README.md"
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I`
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m "Version $(NEW_TAG)" -a -v
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
306
README.md
306
README.md
@@ -1,12 +1,13 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jul 7, 2014
|
||||
|
||||
Rclone
|
||||
======
|
||||
|
||||
[](http://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Google Drive
|
||||
@@ -26,300 +27,13 @@ Features
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
|
||||
See the Home page for more documentation and configuration walkthroughs.
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
Download the binary for your OS from
|
||||
|
||||
* http://rclone.org/downloads/
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go install github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
First you'll need to configure rclone. As the object storage systems
|
||||
have quite complicated authentication these are kept in a config file
|
||||
`.rclone.conf` in your home directory by default. (You can use the
|
||||
`--config` option to choose a different config file.)
|
||||
|
||||
The easiest way to make the config is to run rclone with the config
|
||||
option, Eg
|
||||
|
||||
rclone config
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Rclone syncs a directory tree from local to remote.
|
||||
|
||||
Its basic syntax is
|
||||
|
||||
Syntax: [options] subcommand <parameters> <parameters...>
|
||||
|
||||
See below for how to specify the source and destination paths.
|
||||
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
|
||||
Sync the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Deletes any files that exist in source that don't
|
||||
exist in destination. Since this can cause data loss, test
|
||||
first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
|
||||
List all the objects in the the path with sizes.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
|
||||
List all the objects in the the path with sizes and timestamps.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
|
||||
List all directories/objects/buckets in the the path.
|
||||
|
||||
rclone mkdir remote:path
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone md5sum remote:path
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This is in
|
||||
the same format as the standard md5sum tool produces.
|
||||
|
||||
General options:
|
||||
|
||||
```
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
```
|
||||
|
||||
Developer options:
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
|
||||
Local Filesystem
|
||||
----------------
|
||||
|
||||
Paths are specified as normal filesystem paths, so
|
||||
|
||||
rclone sync /home/source /tmp/destination
|
||||
|
||||
Will sync `/home/source` to `/tmp/destination`
|
||||
|
||||
Swift / Rackspace cloudfiles / Memset Memstore
|
||||
----------------------------------------------
|
||||
|
||||
Paths are specified as remote:container (or remote: for the `lsd`
|
||||
command.) You may put subdirectories in too, eg
|
||||
`remote:container/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a swift container called backup:
|
||||
|
||||
rclone sync /home/source swift:backup
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch.
|
||||
|
||||
This is a defacto standard (used in the official python-swiftclient
|
||||
amongst others) for storing the modification time (as read using
|
||||
os.Stat) for an object.
|
||||
|
||||
Amazon S3
|
||||
---------
|
||||
|
||||
Paths are specified as remote:bucket. You may put subdirectories in
|
||||
too, eg `remote:bucket/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a s3 container called backup
|
||||
|
||||
rclone sync /home/source s3:backup
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch.
|
||||
|
||||
Google drive
|
||||
------------
|
||||
|
||||
Paths are specified as remote:path Drive paths may be as deep as required.
|
||||
|
||||
The initial setup for drive involves getting a token from Google drive
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Google drive stores modification times accurate to 1 ms natively.
|
||||
|
||||
Dropbox
|
||||
-------
|
||||
|
||||
Paths are specified as remote:path Dropbox paths may be as deep as required.
|
||||
|
||||
The initial setup for dropbox involves getting a token from Dropbox
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source dropbox:backup
|
||||
|
||||
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
|
||||
a Dropbox datastore called "rclone". Dropbox datastores are limited
|
||||
to 100,000 rows so this is the maximum number of files rclone can
|
||||
manage on Dropbox.
|
||||
|
||||
Google Cloud Storage
|
||||
--------------------
|
||||
|
||||
Paths are specified as remote:path Google Cloud Storage paths may be
|
||||
as deep as required.
|
||||
|
||||
The initial setup for Google Cloud Storage involves getting a token
|
||||
from Google which you need to do in your browser. `rclone config`
|
||||
walks you through it.
|
||||
|
||||
To copy a local directory to a google cloud storage directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Google google cloud storage stores md5sums natively and rclone stores
|
||||
modification times as metadata on the object, under the "mtime" key in
|
||||
RFC3339 format accurate to 1ns.
|
||||
|
||||
Single file copies
|
||||
------------------
|
||||
|
||||
Rclone can copy single files
|
||||
|
||||
rclone src:path/to/file dst:path/dir
|
||||
|
||||
Or
|
||||
|
||||
rclone src:path/to/file dst:path/to/file
|
||||
|
||||
Note that you can't rename the file if you are copying from one file to another.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
|
||||
Bugs
|
||||
----
|
||||
|
||||
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
|
||||
* quota is 100.0 requests/second/user
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
|
||||
Changelog
|
||||
---------
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or send pull requests.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Your name goes here!
|
||||
|
||||
19
RELEASE.md
Normal file
19
RELEASE.md
Normal file
@@ -0,0 +1,19 @@
|
||||
Required software for making a release
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* [gox](https://github.com/mitchellh/gox) for cross compiling
|
||||
* Run `gox -build-toolchain`
|
||||
* This assumes you have your own source checkout
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
* go get -u -f -v ./...
|
||||
* make test
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
|
||||
* make cross
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git push --tags origin master
|
||||
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
# This uses gox from https://github.com/mitchellh/gox
|
||||
# Make sure you've run gox -build-toolchain
|
||||
# Make sure you've run gox -build-toolchain - not required for go >= 1.5
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "Syntax: $0 Version"
|
||||
@@ -13,7 +13,9 @@ VERSION="$1"
|
||||
|
||||
rm -rf build
|
||||
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
|
||||
# Not implemented yet: nacl dragonfly android
|
||||
# gox -osarch-list for definitive list
|
||||
|
||||
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
|
||||
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
@@ -21,10 +23,12 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
cd build
|
||||
|
||||
for d in `ls`; do
|
||||
cp -a ../README.txt $d/
|
||||
cp -a ../README.html $d/
|
||||
cp -a ../MANUAL.txt $d/README.txt
|
||||
cp -a ../MANUAL.html $d/README.html
|
||||
cp -a ../rclone.1 $d/
|
||||
zip -r9 $d.zip $d
|
||||
d_current=${d/-${VERSION}/-current}
|
||||
ln $d.zip $d_current.zip
|
||||
rm -rf $d
|
||||
done
|
||||
|
||||
|
||||
17
docs/content/authors.md
Normal file
17
docs/content/authors.md
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2014-06-16"
|
||||
---
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Alex Couper <amcouper@gmail.com>
|
||||
* Leonid Shalupov <leonid@shalupov.com>
|
||||
* Shimon Doodkin <helpmepro1@gmail.com>
|
||||
31
docs/content/bugs.md
Normal file
31
docs/content/bugs.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "Bugs"
|
||||
description: "Rclone Bugs and Limitations"
|
||||
date: "2014-06-16"
|
||||
---
|
||||
|
||||
Bugs and Limitations
|
||||
--------------------
|
||||
|
||||
### Empty directories are left behind / not created ##
|
||||
|
||||
With remotes that have a concept of directory, eg Local and Drive,
|
||||
empty directories may be left behind, or not created when one was
|
||||
expected.
|
||||
|
||||
This is because rclone doesn't have a concept of a directory - it only
|
||||
works on objects. Most of the object storage systems can't actually
|
||||
store a directory so there is nowhere for rclone to store anything
|
||||
about directories.
|
||||
|
||||
You can work round this to some extent with the`purge` command which
|
||||
will delete everything under the path, **inluding** empty directories.
|
||||
|
||||
This may be fixed at some point in
|
||||
[Issue #100](https://github.com/ncw/rclone/issues/100)
|
||||
|
||||
### Directory timestamps aren't preserved ##
|
||||
|
||||
For the same reason as the above, rclone doesn't have a concept of a
|
||||
directory - it only works on objects, therefore it can't preserve the
|
||||
timestamps of directories.
|
||||
151
docs/content/changelog.md
Normal file
151
docs/content/changelog.md
Normal file
@@ -0,0 +1,151 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-08-17"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.19 - 2015-08-28
|
||||
* New features
|
||||
* Server side copies for s3/swift/drive/dropbox/gcs
|
||||
* Move command - uses server side copies if it can
|
||||
* Implement --retries flag - tries 3 times by default
|
||||
* Build for plan9/amd64 and solaris/amd64 too
|
||||
* Fixes
|
||||
* Make a current version download with a fixed URL for scripting
|
||||
* Ignore rmdir in limited fs rather than throwing error
|
||||
* dropbox
|
||||
* Increase chunk size to improve upload speeds massively
|
||||
* Issue an error message when trying to upload bad file name
|
||||
* v1.18 - 2015-08-17
|
||||
* drive
|
||||
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
|
||||
* Add "Forbidden to download" message for files with no downloadURL
|
||||
* dropbox
|
||||
* Remove datastore
|
||||
* This was deprecated and it caused a lot of problems
|
||||
* Modification times and MD5SUMs no longer stored
|
||||
* Fix uploading files > 2GB
|
||||
* s3
|
||||
* use official AWS SDK from github.com/aws/aws-sdk-go
|
||||
* **NB** will most likely require you to delete and recreate remote
|
||||
* enable multipart upload which enables files > 5GB
|
||||
* tested with Ceph / RadosGW / S3 emulation
|
||||
* many thanks to Sam Liston and Brian Haymore at the [Utah
|
||||
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
|
||||
* misc
|
||||
* Show errors when reading the config file
|
||||
* Do not print stats in quiet mode - thanks Leonid Shalupov
|
||||
* Add FAQ
|
||||
* Fix created directories not obeying umask
|
||||
* Linux installation instructions - thanks Shimon Doodkin
|
||||
* v1.17 - 2015-06-14
|
||||
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
|
||||
* v1.16 - 2015-06-09
|
||||
* Fix uploading big files which was causing timeouts or panics
|
||||
* Don't check md5sum after download with --size-only
|
||||
* v1.15 - 2015-06-06
|
||||
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
|
||||
* Implement --size-only flag to sync on size not checksum & modtime
|
||||
* Expand docs and remove duplicated information
|
||||
* Document rclone's limitations with directories
|
||||
* dropbox: update docs about case insensitivity
|
||||
* v1.14 - 2015-05-21
|
||||
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
|
||||
* drive: docs about rate limiting
|
||||
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
|
||||
* v1.13 - 2015-05-10
|
||||
* Revise documentation (especially sync)
|
||||
* Implement --timeout and --conntimeout
|
||||
* s3: ignore etags from multipart uploads which aren't md5sums
|
||||
* v1.12 - 2015-03-15
|
||||
* drive: Use chunked upload for files above a certain size
|
||||
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
|
||||
* drive: switch to insert from update when a failed copy deletes the upload
|
||||
* core: Log duplicate files if they are detected
|
||||
* v1.11 - 2015-03-04
|
||||
* swift: add region parameter
|
||||
* drive: fix crash on failed to update remote mtime
|
||||
* In remote paths, change native directory separators to /
|
||||
* Add synchronization to ls/lsl/lsd output to stop corruptions
|
||||
* Ensure all stats/log messages to go stderr
|
||||
* Add --log-file flag to log everything (including panics) to file
|
||||
* Make it possible to disable stats printing with --stats=0
|
||||
* Implement --bwlimit to limit data transfer bandwidth
|
||||
* v1.10 - 2015-02-12
|
||||
* s3: list an unlimited number of items
|
||||
* Fix getting stuck in the configurator
|
||||
* v1.09 - 2015-02-07
|
||||
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
|
||||
* local: Fix directory separators on Windows
|
||||
* drive: fix rate limit exceeded errors
|
||||
* v1.08 - 2015-02-04
|
||||
* drive: fix subdirectory listing to not list entire drive
|
||||
* drive: Fix SetModTime
|
||||
* dropbox: adapt code to recent library changes
|
||||
* v1.07 - 2014-12-23
|
||||
* google cloud storage: fix memory leak
|
||||
* v1.06 - 2014-12-12
|
||||
* Fix "Couldn't find home directory" on OSX
|
||||
* swift: Add tenant parameter
|
||||
* Use new location of Google API packages
|
||||
* v1.05 - 2014-08-09
|
||||
* Improved tests and consequently lots of minor fixes
|
||||
* core: Fix race detected by go race detector
|
||||
* core: Fixes after running errcheck
|
||||
* drive: reset root directory on Rmdir and Purge
|
||||
* fs: Document that Purger returns error on empty directory, test and fix
|
||||
* google cloud storage: fix ListDir on subdirectory
|
||||
* google cloud storage: re-read metadata in SetModTime
|
||||
* s3: make reading metadata more reliable to work around eventual consistency problems
|
||||
* s3: strip trailing / from ListDir()
|
||||
* swift: return directories without / in ListDir
|
||||
* v1.04 - 2014-07-21
|
||||
* google cloud storage: Fix crash on Update
|
||||
* v1.03 - 2014-07-20
|
||||
* swift, s3, dropbox: fix updated files being marked as corrupted
|
||||
* Make compile with go 1.1 again
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
@@ -5,8 +5,17 @@ date: "2014-04-26"
|
||||
---
|
||||
|
||||
Contact the rclone project
|
||||
--------------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or contribute pull
|
||||
requests.
|
||||
|
||||
See also
|
||||
|
||||
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
|
||||
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
|
||||
|
||||
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)
|
||||
|
||||
@@ -1,22 +1,9 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Documentation"
|
||||
date: "2014-07-17"
|
||||
description: "Rclone Usage"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
@@ -30,11 +17,13 @@ option:
|
||||
|
||||
rclone config
|
||||
|
||||
See below for detailed instructions for
|
||||
See the following for detailed instructions for
|
||||
|
||||
* [Google drive](/drive/)
|
||||
* [Amazon S3](/s3/)
|
||||
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Google Cloud Storage](/googlcloudstorage/)
|
||||
* [Local filesystem](/local/)
|
||||
|
||||
Usage
|
||||
@@ -55,104 +44,247 @@ You can define as many storage paths as you like in the config file.
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
### rclone copy source:path dest:path ###
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
### rclone sync source:path dest:path ###
|
||||
|
||||
Sync the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Deletes any files that exist in source that don't
|
||||
exist in destination. Since this can cause data loss, test
|
||||
first with the -dry-run flag.
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
### rclone ls [remote:path] ###
|
||||
|
||||
List all the objects in the the path with sizes.
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
### rclone lsd [remote:path] ###
|
||||
|
||||
List all the objects in the the path with sizes and timestamps.
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
### rclone lsl [remote:path] ###
|
||||
|
||||
List all directories/objects/buckets in the the path.
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
rclone mkdir remote:path
|
||||
### rclone md5sum [remote:path] ###
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
### rclone mkdir remote:path ###
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
### rclone rmdir remote:path ###
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
### rclone purge remote:path ###
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
### rclone check source:path dest:path ###
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone md5sum remote:path
|
||||
### rclone config ###
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This is in
|
||||
the same format as the standard md5sum tool produces.
|
||||
General options:
|
||||
Enter an interactive configuration session.
|
||||
|
||||
```
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
```
|
||||
### rclone help ###
|
||||
|
||||
Developer options:
|
||||
Prints help on rclone commands and options.
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
Server Side Copy
|
||||
----------------
|
||||
|
||||
License
|
||||
Drive, S3, Dropbox, Swift and Google Cloud Storage support server side
|
||||
copy.
|
||||
|
||||
This means if you want to copy one folder to another then rclone won't
|
||||
download all the files and re-upload them; it will instruct the server
|
||||
to copy them in place.
|
||||
|
||||
Eg
|
||||
|
||||
rclone copy s3:oldbucket s3:newbucket
|
||||
|
||||
Will copy the contents of `oldbucket` to `newbucket` without
|
||||
downloading and re-uploading.
|
||||
|
||||
Remotes which don't support server side copy (eg local) **will**
|
||||
download and re-upload in this case.
|
||||
|
||||
Server side copies are used with `sync` and `copy` and will be
|
||||
identified in the log when using the `-v` flag.
|
||||
|
||||
Server side copies will only be attempted if the remote names are the
|
||||
same.
|
||||
|
||||
This can be used when scripting to make aged backups efficiently, eg
|
||||
|
||||
rclone sync remote:current-backup remote:previous-backup
|
||||
rclone sync /path/to/files remote:current-backup
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Bugs
|
||||
----
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
* Doesn't sync individual files yet, only directories.
|
||||
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
|
||||
* quota is 100.0 requests/second/user
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
Options which use SIZE use kByte by default. However a suffix of `k`
|
||||
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
|
||||
the binary units, eg 2**10, 2**20, 2**30 respectively.
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
### --bwlimit=SIZE ###
|
||||
|
||||
The project website is at:
|
||||
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
|
||||
which means to not limit bandwidth.
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
This only limits the bandwidth of the data transfer, it doesn't limit
|
||||
the bandwith of the directory listings etc.
|
||||
|
||||
Authors
|
||||
-------
|
||||
### --checkers=N ###
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
The number of checkers to run in parallel. Checkers do the equality
|
||||
checking of files during a sync. For some storage systems (eg s3,
|
||||
swift, dropbox) this can take a significant amount of time so they are
|
||||
run in parallel.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
The default is to run 8 checkers in parallel.
|
||||
|
||||
* Your name goes here!
|
||||
### -c, --checksum ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
MD5SUM and size to determine if files are equal.
|
||||
|
||||
This is very useful when transferring between remotes which store the
|
||||
MD5SUM on the object which include swift, s3, drive, and google cloud
|
||||
storage.
|
||||
|
||||
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
|
||||
quicker than without the `--checksum` flag.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --config=CONFIG_FILE ###
|
||||
|
||||
Specify the location of the rclone config file. Normally this is in
|
||||
your home directory as a file called `.rclone.conf`. If you run
|
||||
`rclone -h` and look at the help for the `--config` option you will
|
||||
see where the default location is for you. Use this flag to override
|
||||
the config location, eg `rclone --config=".myconfig" .config`.
|
||||
|
||||
### --contimeout=TIME ###
|
||||
|
||||
Set the connection timeout. This should be in go time format which
|
||||
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
|
||||
|
||||
The connection timeout is the amount of time rclone will wait for a
|
||||
connection to go through to a remote object storage system. It is
|
||||
`1m` by default.
|
||||
|
||||
### -n, --dry-run ###
|
||||
|
||||
Do a trial run with no permanent changes. Use this in combination
|
||||
with the `-v` flag to see what rclone would do without actually doing
|
||||
it. Useful when setting up the `sync` command.
|
||||
|
||||
### --log-file=FILE ###
|
||||
|
||||
Log all of rclone's output to FILE. This is not active by default.
|
||||
This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
When checking whether a file has been modified, this is the maximum
|
||||
allowed time difference that a file can have and still be considered
|
||||
equivalent.
|
||||
|
||||
The default is `1ns` unless this is overridden by a remote. For
|
||||
example OS X only stores modification times to the nearest second so
|
||||
if you are reading and writing to an OS X filing system this will be
|
||||
`1s` by default.
|
||||
|
||||
This command line flag allows you to override that computed default.
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
Normally rclone outputs stats and a completion message. If you set
|
||||
this flag it will make as little output as possible.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
only the size.
|
||||
|
||||
This can be useful transferring files from dropbox which have been
|
||||
modified by the desktop sync client which doesn't set checksums of
|
||||
modification times in the same way as rclone.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --stats=TIME ###
|
||||
|
||||
Rclone will print stats at regular intervals to show its progress.
|
||||
|
||||
This sets the interval.
|
||||
|
||||
The default is `1m`. Use 0 to disable.
|
||||
|
||||
### --timeout=TIME ###
|
||||
|
||||
This sets the IO idle timeout. If a transfer has started but then
|
||||
becomes idle for this long it is considered broken and disconnected.
|
||||
|
||||
The default is `5m`. Set to 0 to disable.
|
||||
|
||||
### --transfers=N ###
|
||||
|
||||
The number of file transfers to run in parallel. It can sometimes be
|
||||
useful to set this to a smaller number if the remote is giving a lot
|
||||
of timeouts or bigger if you have lots of bandwidth and a fast remote.
|
||||
|
||||
The default is to run 4 file transfers in parallel.
|
||||
|
||||
### -v, --verbose ###
|
||||
|
||||
If you set this flag, rclone will become very verbose telling you
|
||||
about every file it considers and transfers.
|
||||
|
||||
Very useful for debugging.
|
||||
|
||||
### -V, --version ###
|
||||
|
||||
Prints the version number
|
||||
|
||||
Developer options
|
||||
-----------------
|
||||
|
||||
These options are useful when developing or debugging rclone. There
|
||||
are also some more remote specific options which aren't documented
|
||||
here which are used for testing. These start with remote name eg
|
||||
`--drive-test-option`.
|
||||
|
||||
### --cpuprofile=FILE ###
|
||||
|
||||
Write cpu profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
@@ -2,34 +2,73 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2014-07-19"
|
||||
date: "2015-08-28"
|
||||
---
|
||||
|
||||
Rclone Download v1.02
|
||||
Rclone Download v1.19
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.19-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.19-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.02-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.19-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.02-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.02-plan9-386.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.19-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.19-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -31,5 +31,44 @@ Rclone Download VERSION
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google drive"
|
||||
description: "Rclone docs for Google drive"
|
||||
date: "2014-04-26"
|
||||
date: "2015-05-10"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Drive
|
||||
@@ -69,7 +69,31 @@ To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google drive stores modification times accurate to 1 ms.
|
||||
|
||||
### Revisions ###
|
||||
|
||||
Google drive stores revisions of files. When you upload a change to
|
||||
an existing file to google drive using rclone it will create a new
|
||||
revision of that file.
|
||||
|
||||
Revisions follow the standard google policy which at time of writing
|
||||
was
|
||||
|
||||
* They are deleted after 30 days or 100 revisions (whatever comes first).
|
||||
* They do not count towards a user storage quota.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
By default rclone will delete files permanently when requested. If
|
||||
sending them to the trash is required instead then use the
|
||||
`--drive-use-trash` flag.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Drive has quite a lot of rate limiting. This causes rclone to be
|
||||
limited to transferring about 2 files per second only. Individual
|
||||
files may be transferred much faster at 100s of MBytes/s but lots of
|
||||
small files can take a long time.
|
||||
|
||||
@@ -71,10 +71,18 @@ To copy a local directory to a dropbox directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
|
||||
a Dropbox datastore called "rclone". Dropbox datastores are limited
|
||||
to 100,000 rows so this is the maximum number of files rclone can
|
||||
manage on Dropbox.
|
||||
Dropbox doesn't have the capability of storing modification times or
|
||||
MD5SUMs so syncs will effectively have the `--size-only` flag set.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Dropbox is case sensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
There are some file names such as `thumbs.db` which Dropbox can't
|
||||
store. There is a full list of them in the ["Ignored Files" section
|
||||
of this document](https://www.dropbox.com/en/help/145). Rclone will
|
||||
issue an error message `File name disallowed - not uploading` if it
|
||||
attempt to upload one of those file names, but the sync won't fail.
|
||||
|
||||
75
docs/content/faq.md
Normal file
75
docs/content/faq.md
Normal file
@@ -0,0 +1,75 @@
|
||||
---
|
||||
title: "FAQ"
|
||||
description: "Rclone Frequently Asked Questions"
|
||||
date: "2015-08-27"
|
||||
---
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
|
||||
### Do all cloud storage systems support all rclone commands ###
|
||||
|
||||
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is. Eg,
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
Sync files and directories to and from local and remote object stores - v1.18.
|
||||
[snip]
|
||||
Options:
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
```
|
||||
|
||||
So in this config the config file can be found in
|
||||
`/home/user/.rclone.conf`.
|
||||
|
||||
Just copy that to the equivalent place in the destination (run `rclone
|
||||
-h` above again on the destination machine if not sure).
|
||||
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
Note that it effectively downloads the file and uploads it again, so
|
||||
the node running rclone would need to have lots of bandwidth.
|
||||
|
||||
The syncs would be incremental (on a file by file basis).
|
||||
|
||||
Eg
|
||||
|
||||
rclone sync drive:Folder s3:bucket
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
|
||||
You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, eg
|
||||
|
||||
```
|
||||
Server A> rclone sync /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync /tmp/whatever remote:ServerB
|
||||
```
|
||||
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
otherwise the two rclones may delete each others files, eg
|
||||
|
||||
```
|
||||
Server A> rclone copy /tmp/whatever remote:Backup
|
||||
Server B> rclone copy /tmp/whatever remote:Backup
|
||||
```
|
||||
|
||||
The file names you upload from Server A and Server B should be
|
||||
different in this case, otherwise some file systems (eg Drive) may
|
||||
make duplicates.
|
||||
@@ -109,8 +109,7 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google google cloud storage stores md5sums natively and rclone stores
|
||||
modification times as metadata on the object, under the "mtime" key in
|
||||
|
||||
39
docs/content/install.md
Normal file
39
docs/content/install.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "Install"
|
||||
description: "Rclone Installation"
|
||||
date: "2015-06-12"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`. If you have built
|
||||
rclone before then you will want to update its dependencies first with
|
||||
this (remove `-f` if using go < 1.4)
|
||||
|
||||
go get -u -v -f github.com/ncw/rclone/...
|
||||
|
||||
See the [Usage section](/docs/) of the docs for how to use rclone, or
|
||||
run `rclone -h`.
|
||||
|
||||
linux binary downloaded files install example
|
||||
-------
|
||||
|
||||
unzip rclone-v1.17-linux-amd64.zip
|
||||
cd rclone-v1.17-linux-amd64
|
||||
#copy binary file
|
||||
sudo cp rclone /usr/sbin/
|
||||
sudo chown root:root /usr/sbin/rclone
|
||||
sudo chmod 755 /usr/sbin/rclone
|
||||
#install manpage
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
34
docs/content/licence.md
Normal file
34
docs/content/licence.md
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "Licence"
|
||||
description: "Rclone Licence"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included with the source code).
|
||||
|
||||
```
|
||||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
```
|
||||
|
||||
@@ -16,10 +16,23 @@ Will sync `/home/source` to `/tmp/destination`
|
||||
These can be configured into the config file for consistencies sake,
|
||||
but it is probably easier not to.
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Rclone reads and writes the modified time using an accuracy determined by
|
||||
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
|
||||
on OS X.
|
||||
|
||||
### Filenames ###
|
||||
|
||||
Filenames are expected to be encoded in UTF-8 on disk. This is the
|
||||
normal case for Windows and OS X. There is a bit more uncertainty in
|
||||
the Linux world, but new distributions will have UTF-8 encoded files
|
||||
names.
|
||||
|
||||
If an invalid (non-UTF8) filename is read, the invalid caracters will
|
||||
be replaced with the unicode replacement character, '<27>'. `rclone`
|
||||
will emit a debug message in this case (use `-v` to see), eg
|
||||
|
||||
```
|
||||
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
|
||||
```
|
||||
|
||||
65
docs/content/privacy.md
Normal file
65
docs/content/privacy.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
title: "Privacy Policy"
|
||||
description: "Rclone Privacy Policy"
|
||||
date: "2015-08-19"
|
||||
---
|
||||
|
||||
# Rclone Privacy Policy #
|
||||
|
||||
## What is this Privacy Policy for? ##
|
||||
|
||||
This privacy policy is for this website http://rclone.org and governs the privacy of its users who choose to use it.
|
||||
|
||||
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
|
||||
|
||||
## The Website ##
|
||||
|
||||
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
|
||||
|
||||
## Use of Cookies ##
|
||||
|
||||
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
|
||||
|
||||
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
|
||||
|
||||
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
|
||||
|
||||
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](http://www.google.com/privacy.html) for further information.
|
||||
|
||||
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
|
||||
|
||||
## Contact & Communication ##
|
||||
|
||||
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
|
||||
|
||||
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
|
||||
|
||||
## External Links ##
|
||||
|
||||
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
|
||||
|
||||
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
## Adverts and Sponsored Links ##
|
||||
|
||||
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
|
||||
|
||||
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
### Social Media Platforms ##
|
||||
|
||||
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
|
||||
|
||||
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
|
||||
|
||||
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
|
||||
|
||||
## Resources & Further Information ##
|
||||
|
||||
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
|
||||
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
|
||||
* [Twitter Privacy Policy](http://twitter.com/privacy)
|
||||
* [Facebook Privacy Policy](http://www.facebook.com/about/privacy/)
|
||||
* [Google Privacy Policy](http://www.google.com/privacy.html)
|
||||
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
@@ -27,41 +27,55 @@ Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 2
|
||||
AWS Access Key ID.
|
||||
access_key_id> accesskey
|
||||
AWS Secret Access Key (password).
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for S3 API.
|
||||
Region to connect to.
|
||||
Choose a number from below, or type in your own value
|
||||
* The default endpoint - a good choice if you are unsure.
|
||||
* US Region, Northern Virginia or Pacific Northwest.
|
||||
* Leave location constraint empty.
|
||||
1) https://s3.amazonaws.com/
|
||||
* US Region, Northern Virginia only.
|
||||
* Leave location constraint empty.
|
||||
2) https://s3-external-1.amazonaws.com
|
||||
1) us-east-1
|
||||
* US West (Oregon) Region
|
||||
* Needs location constraint us-west-2.
|
||||
2) us-west-2
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region
|
||||
* Needs location constraint sa-east-1.
|
||||
9) https://s3-sa-east-1.amazonaws.com
|
||||
endpoint> 1
|
||||
Location constraint - must be set to match the Endpoint.
|
||||
9) sa-east-1
|
||||
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
|
||||
10) other-v2-signature
|
||||
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
|
||||
11) other-v4-signature
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Leave blank if using AWS to use the default endpoint for the region.
|
||||
Specify if using an S3 clone such as Ceph.
|
||||
endpoint>
|
||||
Location constraint - must be set to match the Region. Used when creating buckets only.
|
||||
Choose a number from below, or type in your own value
|
||||
* Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
1)
|
||||
* US West (Oregon) Region.
|
||||
2) us-west-2
|
||||
* US West (Northern California) Region.
|
||||
3) us-west-1
|
||||
* EU (Ireland) Region.
|
||||
4) eu-west-1
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region.
|
||||
9) sa-east-1
|
||||
location_constraint> 1
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
access_key_id = accesskey
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = https://s3.amazonaws.com/
|
||||
region = us-east-1
|
||||
endpoint =
|
||||
location_constraint =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
@@ -100,8 +114,60 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded with multipart
|
||||
upload don't have an MD5SUM.
|
||||
|
||||
### Buckets and Regions ###
|
||||
|
||||
With Amazon S3 you can list buckets (`rclone lsd`) using any region,
|
||||
but you can only access the content of a bucket from the region it was
|
||||
created in. If you attempt to access a bucket from the wrong region,
|
||||
you will get an error, `incorrect region, the bucket is not in 'XXX'
|
||||
region`.
|
||||
|
||||
### Ceph ###
|
||||
|
||||
Ceph is an object storage system which presents an Amazon S3 interface.
|
||||
|
||||
To use rclone with ceph, you need to set the following parameters in
|
||||
the config.
|
||||
|
||||
```
|
||||
access_key_id = Whatever
|
||||
secret_access_key = Whatever
|
||||
endpoint = https://ceph.endpoint.goes.here/
|
||||
region = other-v2-signature
|
||||
```
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
only write `/` in the secret access key.
|
||||
|
||||
Eg the dump from Ceph looks something like this (irrelevant keys
|
||||
removed).
|
||||
|
||||
```
|
||||
{
|
||||
"user_id": "xxx",
|
||||
"display_name": "xxxx",
|
||||
"keys": [
|
||||
{
|
||||
"user": "xxx",
|
||||
"access_key": "xxxxxx",
|
||||
"secret_key": "xxxxxx\/xxxx"
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
Because this is a json dump, it is encoding the `/` as `\/`, so if you
|
||||
use the secret key as `xxxxxx/xxxx` it will work fine.
|
||||
|
||||
@@ -52,12 +52,15 @@ Choose a number from below, or type in your own value
|
||||
* Memset Memstore UK v2
|
||||
5) https://auth.storage.memset.com/v2.0
|
||||
auth> 1
|
||||
Tenant name - optional
|
||||
tenant>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
user = user_name
|
||||
key = password_or_api_key
|
||||
auth = https://auth.api.rackspacecloud.com/v1.0
|
||||
tenant =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -84,8 +87,7 @@ excess files in the container.
|
||||
|
||||
rclone sync /home/local/directory remote:container
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
|
||||
<script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
|
||||
@@ -12,8 +12,19 @@
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
|
||||
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
|
||||
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
|
||||
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
|
||||
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
|
||||
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
|
||||
<li><a href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
|
||||
<ul class="dropdown-menu">
|
||||
@@ -25,6 +36,7 @@
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1019 B |
444
drive/drive.go
444
drive/drive.go
@@ -1,14 +1,6 @@
|
||||
// Drive interface
|
||||
package drive
|
||||
|
||||
// Gets this quite often
|
||||
// Failed to set mtime: googleapi: Error 403: Rate Limit Exceeded
|
||||
|
||||
// FIXME list containers equivalent should list directories?
|
||||
|
||||
// FIXME list directory should list to channel for concurrency not
|
||||
// append to array
|
||||
|
||||
// FIXME need to deal with some corner cases
|
||||
// * multiple files with the same name
|
||||
// * files can be in multiple directories
|
||||
@@ -18,16 +10,14 @@ package drive
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/google-api-go-client/drive/v2"
|
||||
"google.golang.org/api/drive/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/googleauth"
|
||||
"github.com/ogier/pflag"
|
||||
@@ -38,14 +28,22 @@ const (
|
||||
rcloneClientId = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
RFC3339In = time.RFC3339
|
||||
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
|
||||
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize = fs.SizeSuffix(256 * 1024)
|
||||
driveUploadCutoff = chunkSize
|
||||
// Description of how to auth for this app
|
||||
driveAuth = &googleauth.Auth{
|
||||
Scope: "https://www.googleapis.com/auth/drive",
|
||||
@@ -70,10 +68,13 @@ func init() {
|
||||
Help: "Google Application Client Secret - leave blank to use rclone's.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
||||
}
|
||||
|
||||
// FsDrive represents a remote drive server
|
||||
type FsDrive struct {
|
||||
name string // name of this remote
|
||||
svc *drive.Service // the connection to the drive server
|
||||
root string // the path we are working on
|
||||
client *http.Client // authorized client
|
||||
@@ -81,8 +82,10 @@ type FsDrive struct {
|
||||
rootId string // Id of the root directory
|
||||
foundRoot bool // Whether we have found the root or not
|
||||
findRootLock sync.Mutex // Protect findRoot from concurrent use
|
||||
dirCache dirCache // Map of directory path to directory id
|
||||
dirCache *dirCache // Map of directory path to directory id
|
||||
findDirLock sync.Mutex // Protect findDir from concurrent use
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
}
|
||||
|
||||
// FsObjectDrive describes a drive object
|
||||
@@ -104,8 +107,8 @@ type dirCache struct {
|
||||
}
|
||||
|
||||
// Make a new locked map
|
||||
func newDirCache() dirCache {
|
||||
d := dirCache{}
|
||||
func newDirCache() *dirCache {
|
||||
d := &dirCache{}
|
||||
d.Flush()
|
||||
return d
|
||||
}
|
||||
@@ -144,11 +147,107 @@ func (m *dirCache) Flush() {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsDrive) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsDrive to a string
|
||||
func (f *FsDrive) String() string {
|
||||
return fmt.Sprintf("Google drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Start a call to the drive API
|
||||
//
|
||||
// This must be called as a pair with endCall
|
||||
//
|
||||
// This waits for the pacer token
|
||||
func (f *FsDrive) beginCall() {
|
||||
// pacer starts with a token in and whenever we take one out
|
||||
// XXX ms later we put another in. We could do this with a
|
||||
// Ticker more accurately, but then we'd have to work out how
|
||||
// not to run it when it wasn't needed
|
||||
<-f.pacer
|
||||
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
f.pacer <- struct{}{}
|
||||
}(f.sleepTime)
|
||||
}
|
||||
|
||||
// End a call to the drive API
|
||||
//
|
||||
// Refresh the pace given an error that was returned. It returns a
|
||||
// boolean as to whether the operation should be retried.
|
||||
//
|
||||
// See https://developers.google.com/drive/web/handle-errors
|
||||
// http://stackoverflow.com/questions/18529524/403-rate-limit-after-only-1-insert-per-second
|
||||
func (f *FsDrive) endCall(err error) bool {
|
||||
again := false
|
||||
oldSleepTime := f.sleepTime
|
||||
if err == nil {
|
||||
f.sleepTime = (f.sleepTime<<decayConstant - f.sleepTime) >> decayConstant
|
||||
if f.sleepTime < minSleep {
|
||||
f.sleepTime = minSleep
|
||||
}
|
||||
if f.sleepTime != oldSleepTime {
|
||||
fs.Debug(f, "Reducing sleep to %v", f.sleepTime)
|
||||
}
|
||||
} else {
|
||||
fs.Debug(f, "Error recived: %T %#v", err, err)
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
again = true
|
||||
}
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
again = true
|
||||
}
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if again {
|
||||
f.sleepTime *= 2
|
||||
if f.sleepTime > maxSleep {
|
||||
f.sleepTime = maxSleep
|
||||
}
|
||||
if f.sleepTime != oldSleepTime {
|
||||
fs.Debug(f, "Rate limited, increasing sleep to %v", f.sleepTime)
|
||||
}
|
||||
}
|
||||
return again
|
||||
}
|
||||
|
||||
// Pace the remote operations to not exceed Google's limits and retry
|
||||
// on 403 rate limit exceeded
|
||||
//
|
||||
// This calls fn, expecting it to place its error in perr
|
||||
func (f *FsDrive) call(perr *error, fn func()) {
|
||||
for {
|
||||
f.beginCall()
|
||||
fn()
|
||||
if !f.endCall(*perr) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseParse parses a drive 'url'
|
||||
func parseDrivePath(path string) (root string, err error) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -186,7 +285,10 @@ func (f *FsDrive) listAll(dirId string, title string, directoriesOnly bool, file
|
||||
list := f.svc.Files.List().Q(query).MaxResults(1000)
|
||||
OUTER:
|
||||
for {
|
||||
files, err := list.Do()
|
||||
var files *drive.FileList
|
||||
f.call(&err, func() {
|
||||
files, err = list.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Couldn't list directory: %s", err)
|
||||
}
|
||||
@@ -204,8 +306,27 @@ OUTER:
|
||||
return
|
||||
}
|
||||
|
||||
// Returns true of x is a power of 2 or zero
|
||||
func isPowerOfTwo(x int64) bool {
|
||||
switch {
|
||||
case x == 0:
|
||||
return true
|
||||
case x < 0:
|
||||
return false
|
||||
default:
|
||||
return (x & (x - 1)) == 0
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsDrive from the path, container:path
|
||||
func NewFs(name, path string) (fs.Fs, error) {
|
||||
if !isPowerOfTwo(int64(chunkSize)) {
|
||||
return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
||||
}
|
||||
if chunkSize < 256*1024 {
|
||||
return nil, fmt.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
||||
}
|
||||
|
||||
t, err := driveAuth.NewTransport(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -217,10 +338,16 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &FsDrive{
|
||||
root: root,
|
||||
dirCache: newDirCache(),
|
||||
name: name,
|
||||
root: root,
|
||||
dirCache: newDirCache(),
|
||||
pacer: make(chan struct{}, 1),
|
||||
sleepTime: minSleep,
|
||||
}
|
||||
|
||||
// Put the first pacing token in
|
||||
f.pacer <- struct{}{}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = t.Client()
|
||||
f.svc, err = drive.New(f.client)
|
||||
@@ -229,15 +356,15 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Read About so we know the root path
|
||||
f.about, err = f.svc.About.Get().Do()
|
||||
f.call(&err, func() {
|
||||
f.about, err = f.svc.About.Get().Do()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't read info about Drive: %s", err)
|
||||
}
|
||||
|
||||
// Find the Id of the root directory and the Id of its parent
|
||||
f.rootId = f.about.RootFolderId
|
||||
// Put the root directory in
|
||||
f.dirCache.Put("", f.rootId)
|
||||
// Find the Id of the true root and clear everything
|
||||
f.resetRoot()
|
||||
// Find the current root
|
||||
err = f.findRoot(false)
|
||||
if err != nil {
|
||||
@@ -251,7 +378,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj, err := newF.newFsObjectWithInfo(remote, nil)
|
||||
obj, err := newF.newFsObjectWithInfoErr(remote, nil)
|
||||
if err != nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
@@ -264,7 +391,7 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
||||
func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) {
|
||||
fs := &FsObjectDrive{
|
||||
drive: f,
|
||||
remote: remote,
|
||||
@@ -284,8 +411,8 @@ func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Objec
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
||||
fs, _ := f.newFsObjectWithInfo(remote, info)
|
||||
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
||||
fs, _ := f.newFsObjectWithInfoErr(remote, info)
|
||||
// Errors have already been logged
|
||||
return fs
|
||||
}
|
||||
@@ -294,7 +421,7 @@ func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDrive) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
@@ -318,7 +445,7 @@ func (f *FsDrive) listDirRecursive(dirId string, path string, out fs.ObjectsChan
|
||||
} else {
|
||||
// If item has no MD5 sum it isn't stored on drive, so ignore it
|
||||
if item.Md5Checksum != "" {
|
||||
if fs := f.NewFsObjectWithInfo(path+item.Title, item); fs != nil {
|
||||
if fs := f.newFsObjectWithInfo(path+item.Title, item); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
}
|
||||
@@ -368,7 +495,7 @@ func (f *FsDrive) listDirFull(dirId string, path string, out fs.ObjectsChan) err
|
||||
// fmt.Printf("file %s %s %s\n", path, item.Title, item.Id)
|
||||
// If item has no MD5 sum it isn't stored on drive, so ignore it
|
||||
if item.Md5Checksum != "" {
|
||||
if fs := f.NewFsObjectWithInfo(path, item); fs != nil {
|
||||
if fs := f.newFsObjectWithInfo(path, item); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
}
|
||||
@@ -491,15 +618,18 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
|
||||
if create {
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
info := &drive.File{
|
||||
createInfo := &drive.File{
|
||||
Title: leaf,
|
||||
Description: leaf,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []*drive.ParentReference{{Id: pathId}},
|
||||
}
|
||||
info, err := f.svc.Files.Insert(info).Do()
|
||||
var info *drive.File
|
||||
f.call(&err, func() {
|
||||
info, err = f.svc.Files.Insert(createInfo).Do()
|
||||
})
|
||||
if err != nil {
|
||||
return pathId, fmt.Errorf("Failed to make directory")
|
||||
return pathId, fmt.Errorf("Failed to make directory: %v", err)
|
||||
}
|
||||
pathId = info.Id
|
||||
} else {
|
||||
@@ -537,6 +667,20 @@ func (f *FsDrive) findRoot(create bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resets the root directory to the absolute root and clears the dirCache
|
||||
func (f *FsDrive) resetRoot() {
|
||||
f.findRootLock.Lock()
|
||||
defer f.findRootLock.Unlock()
|
||||
f.foundRoot = false
|
||||
f.dirCache.Flush()
|
||||
|
||||
// Put the true root in
|
||||
f.rootId = f.about.RootFolderId
|
||||
|
||||
// Put the root directory in
|
||||
f.dirCache.Put("", f.rootId)
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsDrive) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
@@ -545,16 +689,16 @@ func (f *FsDrive) List() fs.ObjectsChan {
|
||||
err := f.findRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Couldn't find root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
if *driveFullList {
|
||||
if f.root == "" && *driveFullList {
|
||||
err = f.listDirFull(f.rootId, "", out)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.rootId, "", out)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("List failed: %s", err)
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -569,7 +713,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
err := f.findRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Couldn't find root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
|
||||
dir := &fs.Dir{
|
||||
@@ -577,41 +721,46 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(RFC3339In, item.ModifiedDate)
|
||||
dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate)
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("ListDir failed: %s", err)
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// seekWrapper wraps an io.Reader with a basic Seek for
|
||||
// code.google.com/p/google-api-go-client/googleapi
|
||||
// to detect the length (see getReaderSize function)
|
||||
type seekWrapper struct {
|
||||
in io.Reader
|
||||
size int64
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *seekWrapper) Read(p []byte) (n int, err error) {
|
||||
return file.in.Read(p)
|
||||
}
|
||||
|
||||
// Seek - minimal implementation for Google Drive's length detection
|
||||
func (file *seekWrapper) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
return 0, nil
|
||||
case os.SEEK_END:
|
||||
return file.size, nil
|
||||
// Creates a drive.File info from the parameters passed in and a half
|
||||
// finished FsObjectDrive which must have setMetaData called on it
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) (*FsObjectDrive, *drive.File, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDrive{
|
||||
drive: f,
|
||||
remote: remote,
|
||||
bytes: size,
|
||||
}
|
||||
return 0, nil
|
||||
|
||||
directory, leaf := splitPath(remote)
|
||||
directoryId, err := f.findDir(directory, true)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Couldn't find or make directory: %s", err)
|
||||
}
|
||||
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Title: leaf,
|
||||
Description: leaf,
|
||||
Parents: []*drive.ParentReference{{Id: directoryId}},
|
||||
MimeType: fs.MimeType(o),
|
||||
ModifiedDate: modTime.Format(timeFormatOut),
|
||||
}
|
||||
return o, createInfo, nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -624,36 +773,29 @@ func (file *seekWrapper) Seek(offset int64, whence int) (int64, error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDrive{drive: f, remote: remote}
|
||||
|
||||
directory, leaf := splitPath(o.remote)
|
||||
directoryId, err := f.findDir(directory, true)
|
||||
o, createInfo, err := f.createFileInfo(remote, modTime, size)
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("Couldn't find or make directory: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Guess the mime type
|
||||
mimeType := mime.TypeByExtension(path.Ext(o.remote))
|
||||
if mimeType == "" {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
modifiedDate := modTime.Format(RFC3339Out)
|
||||
|
||||
// Define the metadata for the file we are going to create.
|
||||
info := &drive.File{
|
||||
Title: leaf,
|
||||
Description: leaf,
|
||||
Parents: []*drive.ParentReference{{Id: directoryId}},
|
||||
MimeType: mimeType,
|
||||
ModifiedDate: modifiedDate,
|
||||
}
|
||||
|
||||
// Make the API request to upload metadata and file data.
|
||||
in = &seekWrapper{in: in, size: size}
|
||||
info, err = f.svc.Files.Insert(info).Media(in).Do()
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("Upload failed: %s", err)
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
f.beginCall()
|
||||
info, err = f.svc.Files.Insert(createInfo).Media(in).Do()
|
||||
if f.endCall(err) {
|
||||
return o, fs.RetryErrorf("Upload failed - retry: %s", err)
|
||||
}
|
||||
if err != nil {
|
||||
return o, fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return o, nil
|
||||
@@ -672,7 +814,10 @@ func (f *FsDrive) Rmdir() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
children, err := f.svc.Children.List(f.rootId).MaxResults(10).Do()
|
||||
var children *drive.ChildList
|
||||
f.call(&err, func() {
|
||||
children, err = f.svc.Children.List(f.rootId).MaxResults(10).Do()
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -681,11 +826,18 @@ func (f *FsDrive) Rmdir() error {
|
||||
}
|
||||
// Delete the directory if it isn't the root
|
||||
if f.root != "" {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
f.call(&err, func() {
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
f.resetRoot()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -694,6 +846,39 @@ func (fs *FsDrive) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDrive)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
o.drive.call(&err, func() {
|
||||
info, err = o.drive.svc.Files.Copy(srcObj.id, createInfo).Do()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.setMetaData(info)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
@@ -707,8 +892,14 @@ func (f *FsDrive) Purge() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
f.dirCache.Flush()
|
||||
f.call(&err, func() {
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
}
|
||||
})
|
||||
f.resetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -795,7 +986,7 @@ func (o *FsObjectDrive) ModTime() time.Time {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
modTime, err := time.Parse(RFC3339In, o.modifiedDate)
|
||||
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return time.Now()
|
||||
@@ -808,19 +999,25 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
// New metadata
|
||||
info := &drive.File{
|
||||
ModifiedDate: modTime.Format(RFC3339Out),
|
||||
updateInfo := &drive.File{
|
||||
ModifiedDate: modTime.Format(timeFormatOut),
|
||||
}
|
||||
// Set modified date
|
||||
_, err = o.drive.svc.Files.Update(o.id, info).SetModifiedDate(true).Do()
|
||||
var info *drive.File
|
||||
o.drive.call(&err, func() {
|
||||
info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do()
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
return
|
||||
}
|
||||
// Update info from read data
|
||||
o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
@@ -830,17 +1027,23 @@ func (o *FsObjectDrive) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
if o.url == "" {
|
||||
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
|
||||
}
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
res, err := o.drive.client.Do(req)
|
||||
var res *http.Response
|
||||
o.drive.call(&err, func() {
|
||||
res, err = o.drive.client.Do(req)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
@@ -852,16 +1055,30 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
info := &drive.File{
|
||||
updateInfo := &drive.File{
|
||||
Id: o.id,
|
||||
ModifiedDate: modTime.Format(RFC3339Out),
|
||||
ModifiedDate: modTime.Format(timeFormatOut),
|
||||
}
|
||||
|
||||
// Make the API request to upload metadata and file data.
|
||||
in = &seekWrapper{in: in, size: size}
|
||||
info, err := o.drive.svc.Files.Update(info.Id, info).SetModifiedDate(true).Media(in).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update failed: %s", err)
|
||||
var err error
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
o.drive.beginCall()
|
||||
info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do()
|
||||
if o.drive.endCall(err) {
|
||||
return fs.RetryErrorf("Update failed - retry: %s", err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update failed: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return nil
|
||||
@@ -869,10 +1086,19 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDrive) Remove() error {
|
||||
return o.drive.svc.Files.Delete(o.id).Do()
|
||||
var err error
|
||||
o.drive.call(&err, func() {
|
||||
if *driveUseTrash {
|
||||
_, err = o.drive.svc.Files.Trash(o.id).Do()
|
||||
} else {
|
||||
err = o.drive.svc.Files.Delete(o.id).Do()
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsDrive{}
|
||||
var _ fs.Purger = &FsDrive{}
|
||||
var _ fs.Copier = &FsDrive{}
|
||||
var _ fs.Object = &FsObjectDrive{}
|
||||
|
||||
54
drive/drive_test.go
Normal file
54
drive/drive_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test Drive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
246
drive/upload.go
Normal file
246
drive/upload.go
Normal file
@@ -0,0 +1,246 @@
|
||||
// Upload for drive
|
||||
//
|
||||
// Docs
|
||||
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
|
||||
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
|
||||
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
|
||||
// Files update: https://developers.google.com/drive/v2/reference/files/update
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"google.golang.org/api/drive/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// Number of times to try each chunk
|
||||
maxTries = 10
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *FsDrive
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
// Media is the object being uploaded.
|
||||
Media io.Reader
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
// ContentLength is the full size of the object being uploaded.
|
||||
ContentLength int64
|
||||
// Return value
|
||||
ret *drive.File
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileId := info.Id
|
||||
var body io.Reader = nil
|
||||
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
urls := "https://www.googleapis.com/upload/drive/v2/files"
|
||||
method := "POST"
|
||||
if fileId != "" {
|
||||
params.Set("setModifiedDate", "true")
|
||||
urls += "/{fileId}"
|
||||
method = "PUT"
|
||||
}
|
||||
urls += "?" + params.Encode()
|
||||
req, _ := http.NewRequest(method, urls, body)
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileId,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
f.call(&err, func() {
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc := res.Header.Get("Location")
|
||||
rx := &resumableUpload{
|
||||
f: f,
|
||||
remote: remote,
|
||||
URI: loc,
|
||||
Media: in,
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body []byte) *http.Request {
|
||||
reqSize := int64(len(body))
|
||||
req, _ := http.NewRequest("POST", rx.URI, bytes.NewBuffer(body))
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
} else {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
}
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
return req
|
||||
}
|
||||
|
||||
// rangeRE matches the transfer status response from the server. $1 is
|
||||
// the last byte index uploaded.
|
||||
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return rx.ContentLength, nil
|
||||
}
|
||||
if res.StatusCode != statusResumeIncomplete {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, fmt.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err == nil {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, body []byte) (int, error) {
|
||||
req := rx.makeRequest(start, body)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return res.StatusCode, err
|
||||
}
|
||||
|
||||
// When the entire file upload is complete, the server
|
||||
// responds with an HTTP 201 Created along with any metadata
|
||||
// associated with this resource. If this request had been
|
||||
// updating an existing entity rather than creating a new one,
|
||||
// the HTTP response code for a completed upload would have
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk maxTries times (with a pause of uploadPause between attempts).
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
buf := make([]byte, chunkSize)
|
||||
var StatusCode int
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
} else {
|
||||
buf = buf[:reqSize]
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
_, err := io.ReadFull(rx.Media, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
|
||||
rx.f.beginCall()
|
||||
StatusCode, err = rx.transferChunk(start, buf)
|
||||
rx.f.endCall(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
goto success
|
||||
}
|
||||
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
|
||||
}
|
||||
fs.Debug(rx.remote, "Failed to send chunk")
|
||||
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
|
||||
success:
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
// Resume or retry uploads that fail due to connection interruptions or
|
||||
// any 5xx errors, including:
|
||||
//
|
||||
// 500 Internal Server Error
|
||||
// 502 Bad Gateway
|
||||
// 503 Service Unavailable
|
||||
// 504 Gateway Timeout
|
||||
//
|
||||
// Use an exponential backoff strategy if any 5xx server error is
|
||||
// returned when resuming or retrying upload requests. These errors can
|
||||
// occur if a server is getting overloaded. Exponential backoff can help
|
||||
// alleviate these kinds of problems during periods of high volume of
|
||||
// requests or heavy network traffic. Other kinds of requests should not
|
||||
// be handled by exponential backoff but you can still retry a number of
|
||||
// them. When retrying these requests, limit the number of times you
|
||||
// retry them. For example your code could limit to ten retries or less
|
||||
// before reporting an error.
|
||||
//
|
||||
// Handle 404 Not Found errors when doing resumable uploads by starting
|
||||
// the entire upload over from the beginning.
|
||||
if rx.ret == nil {
|
||||
return nil, fs.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
||||
}
|
||||
return rx.ret, nil
|
||||
}
|
||||
@@ -6,17 +6,26 @@ Limitations of dropbox
|
||||
|
||||
File system is case insensitive
|
||||
|
||||
The datastore is limited to 100,000 records which therefore is the
|
||||
limit of the number of files that rclone can use on dropbox.
|
||||
|
||||
FIXME only open datastore if we need it?
|
||||
|
||||
FIXME Getting this sometimes
|
||||
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
|
||||
This is a JSON decode error - from Update / UploadByChunk
|
||||
- Caused by 500 error from dropbox
|
||||
- See https://github.com/stacktic/dropbox/issues/1
|
||||
- Possibly confusing dropbox with excess concurrency?
|
||||
|
||||
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
|
||||
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
|
||||
is set in the context passed into .Client()
|
||||
|
||||
func (db *Dropbox) client() *http.Client {
|
||||
return db.config.Client(oauth2.NoContext, db.token)
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
So pass in a context with HTTPClient set...
|
||||
*/
|
||||
|
||||
import (
|
||||
@@ -24,29 +33,33 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
uploadChunkSize = 64 * 1024 // chunk size for upload
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
datastoreName = "rclone"
|
||||
tableName = "metadata"
|
||||
md5sumField = "md5sum"
|
||||
mtimeField = "mtime"
|
||||
maxCommitRetries = 5
|
||||
RFC3339In = time.RFC3339
|
||||
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
)
|
||||
|
||||
var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks aren't buffered into memory though so can set large.
|
||||
uploadChunkSize = fs.SizeSuffix(128 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -54,7 +67,7 @@ func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
Name: "dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Config: configHelper,
|
||||
Options: []fs.Option{{
|
||||
Name: "app_key",
|
||||
Help: "Dropbox App Key - leave blank to use rclone's.",
|
||||
@@ -63,10 +76,11 @@ func init() {
|
||||
Help: "Dropbox App Secret - leave blank to use rclone's.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Configuration helper - called after the user has put in the defaults
|
||||
func Config(name string) {
|
||||
func configHelper(name string) {
|
||||
// See if already have a token
|
||||
token := fs.ConfigFile.MustValue(name, "token")
|
||||
if token != "" {
|
||||
@@ -97,28 +111,29 @@ func Config(name string) {
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix
|
||||
slashRootSlash string // root with "/" prefix and postix
|
||||
datastoreManager *dropbox.DatastoreManager
|
||||
datastore *dropbox.Datastore
|
||||
table *dropbox.Table
|
||||
datastoreMutex sync.Mutex // lock this when using the datastore
|
||||
datastoreErr error // pending errors on the datastore
|
||||
name string // name of this remote
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
}
|
||||
|
||||
// FsObjectDropbox describes a dropbox object
|
||||
type FsObjectDropbox struct {
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
md5sum string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsDropbox) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsDropbox to a string
|
||||
func (f *FsDropbox) String() string {
|
||||
return fmt.Sprintf("Dropbox root '%s'", f.root)
|
||||
@@ -144,9 +159,13 @@ func newDropbox(name string) *dropbox.Dropbox {
|
||||
|
||||
// NewFs contstructs an FsDropbox from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
}
|
||||
db := newDropbox(name)
|
||||
f := &FsDropbox{
|
||||
db: db,
|
||||
name: name,
|
||||
db: db,
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
@@ -156,12 +175,6 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Authorize the client
|
||||
db.SetAccessToken(token)
|
||||
|
||||
// Make a db to store rclone metadata in
|
||||
f.datastoreManager = db.NewDatastoreManager()
|
||||
|
||||
// Open the datastore in the background
|
||||
go f.openDataStore()
|
||||
|
||||
// See if the root is actually an object
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDir {
|
||||
@@ -182,39 +195,19 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Sets root in f
|
||||
func (f *FsDropbox) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
f.slashRoot = "/" + f.root
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if f.root != "" {
|
||||
if lowerCaseRoot != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
|
||||
// Opens the datastore in f
|
||||
func (f *FsDropbox) openDataStore() {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
fs.Debug(f, "Open rclone datastore")
|
||||
// Open the rclone datastore
|
||||
var err error
|
||||
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
|
||||
// Get the table we are using
|
||||
f.table, err = f.datastore.GetTable(tableName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore table: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
fs.Debug(f, "Open rclone datastore finished")
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) (fs.Object, error) {
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &FsObjectDropbox{
|
||||
dropbox: f,
|
||||
remote: remote,
|
||||
@@ -225,49 +218,49 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) (fs.
|
||||
err := o.readEntryAndSetMetadata()
|
||||
if err != nil {
|
||||
// logged already fs.Debug("Failed to read info: %s", err)
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) NewFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
fs, _ := f.newFsObjectWithInfo(remote, info)
|
||||
// Errors have already been logged
|
||||
return fs
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Strips the root off entry and returns it
|
||||
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
|
||||
path := entry.Path
|
||||
if strings.HasPrefix(path, f.slashRootSlash) {
|
||||
path = path[len(f.slashRootSlash):]
|
||||
// Strips the root off path and returns it
|
||||
func (f *FsDropbox) stripRoot(path string) *string {
|
||||
lowercase := strings.ToLower(path)
|
||||
|
||||
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
|
||||
return nil
|
||||
}
|
||||
return path
|
||||
|
||||
stripped := path[len(f.slashRootSlash):]
|
||||
return &stripped
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of FsObjects
|
||||
func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
// Track path component case, it could be different for entries coming from DropBox API
|
||||
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
|
||||
// and https://github.com/ncw/rclone/issues/53
|
||||
nameTree := NewNameTree()
|
||||
cursor := ""
|
||||
for {
|
||||
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list: %s", err)
|
||||
break
|
||||
} else {
|
||||
if deltaPage.Reset && cursor != "" {
|
||||
fs.Log(f, "Unexpected reset during listing - try again")
|
||||
fs.ErrorLog(f, "Unexpected reset during listing - try again")
|
||||
fs.Stats.Error()
|
||||
break
|
||||
}
|
||||
@@ -277,29 +270,58 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
entry := deltaEntry.Entry
|
||||
if entry == nil {
|
||||
// This notifies of a deleted object
|
||||
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
|
||||
key := metadataKey(deltaEntry.Path) // Path is lowercased
|
||||
err := f.deleteMetadata(key)
|
||||
if err != nil {
|
||||
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
|
||||
// Don't accumulate Error here
|
||||
} else {
|
||||
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
if entry.IsDir {
|
||||
// ignore directories
|
||||
lastSlashIndex := strings.LastIndex(entry.Path, "/")
|
||||
|
||||
var parentPath string
|
||||
if lastSlashIndex == 0 {
|
||||
parentPath = ""
|
||||
} else {
|
||||
path := f.stripRoot(entry)
|
||||
out <- f.NewFsObjectWithInfo(path, entry)
|
||||
parentPath = entry.Path[1:lastSlashIndex]
|
||||
}
|
||||
lastComponent := entry.Path[lastSlashIndex+1:]
|
||||
|
||||
if entry.IsDir {
|
||||
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
|
||||
} else {
|
||||
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
|
||||
if parentPathCorrectCase != nil {
|
||||
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
} else {
|
||||
nameTree.PutFile(parentPath, lastComponent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !deltaPage.HasMore {
|
||||
break
|
||||
}
|
||||
cursor = deltaPage.Cursor
|
||||
cursor = deltaPage.Cursor.Cursor
|
||||
}
|
||||
}
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
|
||||
path := f.stripRoot("/" + caseCorrectFilePath)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
return
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
}
|
||||
nameTree.WalkFiles(f.root, walkFunc)
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
@@ -320,15 +342,21 @@ func (f *FsDropbox) ListDir() fs.DirChan {
|
||||
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list directories in root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
|
||||
} else {
|
||||
for i := range entry.Contents {
|
||||
entry := &entry.Contents[i]
|
||||
if entry.IsDir {
|
||||
name := f.stripRoot(entry.Path)
|
||||
if name == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- &fs.Dir{
|
||||
Name: f.stripRoot(entry),
|
||||
Name: *name,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: int64(entry.Bytes),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
}
|
||||
}
|
||||
@@ -392,8 +420,37 @@ func (f *FsDropbox) Rmdir() error {
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (fs *FsDropbox) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
func (f *FsDropbox) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectDropbox)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Copy(srcPath, dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %s", err)
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -402,81 +459,11 @@ func (fs *FsDropbox) Precision() time.Duration {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDropbox) Purge() error {
|
||||
// Delete metadata first
|
||||
var wg sync.WaitGroup
|
||||
to_be_deleted := f.List()
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range to_be_deleted {
|
||||
o := dst.(*FsObjectDropbox)
|
||||
o.deleteMetadata()
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let dropbox delete the filesystem tree
|
||||
_, err := f.db.Delete(f.slashRoot)
|
||||
return err
|
||||
}
|
||||
|
||||
// Tries the transaction in fn then calls commit, repeating until retry limit
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) transaction(fn func() error) error {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return f.datastoreErr
|
||||
}
|
||||
var err error
|
||||
for i := 1; i <= maxCommitRetries; i++ {
|
||||
err = fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.datastore.Commit()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to commit metadata changes: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this key
|
||||
func (f *FsDropbox) deleteMetadata(key string) error {
|
||||
return f.transaction(func() error {
|
||||
record, err := f.table.Get(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get record: %s", err)
|
||||
}
|
||||
if record == nil {
|
||||
return nil
|
||||
}
|
||||
record.DeleteRecord()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Reads the record attached to key
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return nil, f.datastoreErr
|
||||
}
|
||||
return f.table.Get(key)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -498,33 +485,8 @@ func (o *FsObjectDropbox) Remote() string {
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
//
|
||||
// FIXME has to download the file!
|
||||
func (o *FsObjectDropbox) Md5sum() (string, error) {
|
||||
if o.md5sum != "" {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return "", fmt.Errorf("Failed to read metadata: %s", err)
|
||||
|
||||
}
|
||||
|
||||
// For pre-existing files which have no md5sum can read it and set it?
|
||||
|
||||
// in, err := o.Open()
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// defer in.Close()
|
||||
// hash := md5.New()
|
||||
// _, err = io.Copy(hash, in)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.md5sum, nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -536,8 +498,9 @@ func (o *FsObjectDropbox) Size() int64 {
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = int64(info.Bytes)
|
||||
o.bytes = info.Bytes
|
||||
o.modTime = time.Time(info.ClientMtime)
|
||||
o.hasMetadata = true
|
||||
}
|
||||
|
||||
// Reads the entry from dropbox
|
||||
@@ -573,7 +536,9 @@ func (o *FsObjectDropbox) remotePath() string {
|
||||
func metadataKey(path string) string {
|
||||
// NB File system is case insensitive
|
||||
path = strings.ToLower(path)
|
||||
return fmt.Sprintf("%x", md5.Sum([]byte(path)))
|
||||
hash := md5.New()
|
||||
_, _ = hash.Write([]byte(path))
|
||||
return fmt.Sprintf("%x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
@@ -583,58 +548,11 @@ func (o *FsObjectDropbox) metadataKey() string {
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
if o.md5sum != "" {
|
||||
if o.hasMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// fs.Debug(o, "Reading metadata from datastore")
|
||||
record, err := o.dropbox.readRecord(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Debug(o, "Couldn't read metadata: %s", err)
|
||||
record = nil
|
||||
}
|
||||
|
||||
if record != nil {
|
||||
// Read md5sum
|
||||
md5sumInterface, ok, err := record.Get(md5sumField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find md5sum in record")
|
||||
} else {
|
||||
md5sum, ok := md5sumInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "md5sum not a string")
|
||||
} else {
|
||||
o.md5sum = md5sum
|
||||
}
|
||||
}
|
||||
|
||||
// read mtime
|
||||
mtimeInterface, ok, err := record.Get(mtimeField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find mtime in record")
|
||||
} else {
|
||||
mtime, ok := mtimeInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "mtime not a string")
|
||||
} else {
|
||||
modTime, err := time.Parse(RFC3339In, mtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Last resort
|
||||
o.readEntryAndSetMetadata()
|
||||
return nil
|
||||
return o.readEntryAndSetMetadata()
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -650,57 +568,12 @@ func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object into the record
|
||||
// FIXME if we don't set md5sum what will that do?
|
||||
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
|
||||
key := o.metadataKey()
|
||||
// fs.Debug(o, "Writing metadata to datastore")
|
||||
return o.dropbox.transaction(func() error {
|
||||
record, err := o.dropbox.table.GetOrInsert(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't read record: %s", err)
|
||||
}
|
||||
|
||||
if md5sum != "" {
|
||||
err = record.Set(md5sumField, md5sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set md5sum record: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !modTime.IsZero() {
|
||||
mtime := modTime.Format(RFC3339Out)
|
||||
err := record.Set(mtimeField, mtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set mtime record: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this file
|
||||
//
|
||||
// It logs any errors
|
||||
func (o *FsObjectDropbox) deleteMetadata() {
|
||||
fs.Debug(o, "Deleting metadata from datastore")
|
||||
err := o.dropbox.deleteMetadata(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Log(o, "Error deleting metadata: %v", err)
|
||||
fs.Stats.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
|
||||
err := o.setModTimeAndMd5sum(modTime, "")
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, err.Error())
|
||||
}
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
@@ -720,27 +593,27 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Calculate md5sum as we upload it
|
||||
hash := md5.New()
|
||||
rc := &readCloser{in: io.TeeReader(in, hash)}
|
||||
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.ErrorLog(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
o.setMetadataFromEntry(entry)
|
||||
|
||||
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.setModTimeAndMd5sum(modTime, md5sum)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDropbox) Remove() error {
|
||||
o.deleteMetadata()
|
||||
_, err := o.dropbox.db.Delete(o.remotePath())
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsDropbox{}
|
||||
var _ fs.Copier = &FsDropbox{}
|
||||
var _ fs.Purger = &FsDropbox{}
|
||||
var _ fs.Object = &FsObjectDropbox{}
|
||||
|
||||
54
dropbox/dropbox_test.go
Normal file
54
dropbox/dropbox_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test Dropbox filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
179
dropbox/nametree.go
Normal file
179
dropbox/nametree.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stacktic/dropbox"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NameTreeNode struct {
|
||||
// Map from lowercase directory name to tree node
|
||||
Directories map[string]*NameTreeNode
|
||||
|
||||
// Map from file name (case sensitive) to dropbox entry
|
||||
Files map[string]*dropbox.Entry
|
||||
|
||||
// Empty string if exact case is unknown or root node
|
||||
CaseCorrectName string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func newNameTreeNode(caseCorrectName string) *NameTreeNode {
|
||||
return &NameTreeNode{
|
||||
CaseCorrectName: caseCorrectName,
|
||||
Directories: make(map[string]*NameTreeNode),
|
||||
Files: make(map[string]*dropbox.Entry),
|
||||
}
|
||||
}
|
||||
|
||||
func NewNameTree() *NameTreeNode {
|
||||
return newNameTreeNode("")
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) String() string {
|
||||
if len(tree.CaseCorrectName) == 0 {
|
||||
return "NameTreeNode/<root>"
|
||||
} else {
|
||||
return fmt.Sprintf("NameTreeNode/%q", tree.CaseCorrectName)
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) getTreeNode(path string) *NameTreeNode {
|
||||
if len(path) == 0 {
|
||||
// no lookup required, just return root
|
||||
return tree
|
||||
}
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if len(component) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
lookup := current.Directories[lowercase]
|
||||
if lookup == nil {
|
||||
lookup = newNameTreeNode("")
|
||||
current.Directories[lowercase] = lookup
|
||||
}
|
||||
|
||||
current = lookup
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
|
||||
if len(caseCorrectDirectoryName) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName)
|
||||
directory := node.Directories[lowerCaseDirectoryName]
|
||||
if directory == nil {
|
||||
directory = newNameTreeNode(caseCorrectDirectoryName)
|
||||
node.Directories[lowerCaseDirectoryName] = directory
|
||||
} else {
|
||||
if len(directory.CaseCorrectName) > 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
directory.CaseCorrectName = caseCorrectDirectoryName
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) PutFile(parentPath string, caseCorrectFileName string, dropboxEntry *dropbox.Entry) {
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if node.Files[caseCorrectFileName] != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node.Files[caseCorrectFileName] = dropboxEntry
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) GetPathWithCorrectCase(path string) *string {
|
||||
if path == "" {
|
||||
empty := ""
|
||||
return &empty
|
||||
}
|
||||
|
||||
var result bytes.Buffer
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if component == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
current = current.Directories[lowercase]
|
||||
if current == nil || current.CaseCorrectName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
result.WriteString("/")
|
||||
result.WriteString(current.CaseCorrectName)
|
||||
}
|
||||
|
||||
resultString := result.String()
|
||||
return &resultString
|
||||
}
|
||||
|
||||
type NameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
|
||||
|
||||
func (tree *NameTreeNode) walkFilesRec(currentPath string, walkFunc NameTreeFileWalkFunc) {
|
||||
var prefix string
|
||||
if currentPath == "" {
|
||||
prefix = ""
|
||||
} else {
|
||||
prefix = currentPath + "/"
|
||||
}
|
||||
|
||||
for name, entry := range tree.Files {
|
||||
walkFunc(prefix+name, entry)
|
||||
}
|
||||
|
||||
for lowerCaseName, directory := range tree.Directories {
|
||||
caseCorrectName := directory.CaseCorrectName
|
||||
if caseCorrectName == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
|
||||
continue
|
||||
}
|
||||
|
||||
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) WalkFiles(rootPath string, walkFunc NameTreeFileWalkFunc) {
|
||||
node := tree.getTreeNode(rootPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
node.walkFilesRec(rootPath, walkFunc)
|
||||
}
|
||||
124
dropbox/nametree_test.go
Normal file
124
dropbox/nametree_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
dropboxapi "github.com/stacktic/dropbox"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
|
||||
if !shouldBeTrue {
|
||||
t.Fatal(failMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryName(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a/b", "C")
|
||||
|
||||
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
|
||||
|
||||
a := tree.Directories["a"]
|
||||
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
|
||||
|
||||
b := a.Directories["b"]
|
||||
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
|
||||
|
||||
c := b.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("/a", "C")
|
||||
tree.PutCaseCorrectDirectoryName("b/", "C")
|
||||
tree.PutCaseCorrectDirectoryName("a//b", "C")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("", "C")
|
||||
|
||||
c := tree.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestGetPathWithCorrectCase(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a", "C")
|
||||
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
|
||||
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalk(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkWithPrefix(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("A", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkIncompleteTree(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
t.Fatal("Should not be called")
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
|
||||
}
|
||||
@@ -10,13 +10,24 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tsenart/tb"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
Stats = NewStats()
|
||||
Stats = NewStats()
|
||||
tokenBucket *tb.Bucket
|
||||
)
|
||||
|
||||
// Start the token bucket if necessary
|
||||
func startTokenBucket() {
|
||||
if bwLimit > 0 {
|
||||
tokenBucket = tb.NewBucket(int64(bwLimit), 100*time.Millisecond)
|
||||
Log(nil, "Starting bandwidth limiter at %vBytes/s", &bwLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// Stringset holds some strings
|
||||
type StringSet map[string]bool
|
||||
|
||||
@@ -113,6 +124,23 @@ func (s *StatsInfo) GetErrors() int64 {
|
||||
return s.errors
|
||||
}
|
||||
|
||||
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
|
||||
func (s *StatsInfo) ResetCounters() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.bytes = 0
|
||||
s.errors = 0
|
||||
s.checks = 0
|
||||
s.transfers = 0
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0
|
||||
func (s *StatsInfo) ResetErrors() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.errors = 0
|
||||
}
|
||||
|
||||
// Errored returns whether there have been any errors
|
||||
func (s *StatsInfo) Errored() bool {
|
||||
s.lock.RLock()
|
||||
@@ -142,6 +170,13 @@ func (s *StatsInfo) DoneChecking(o Object) {
|
||||
s.checks += 1
|
||||
}
|
||||
|
||||
// GetTransfers reads the number of transfers
|
||||
func (s *StatsInfo) GetTransfers() int64 {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.transfers
|
||||
}
|
||||
|
||||
// Transferring adds a transfer into the stats
|
||||
func (s *StatsInfo) Transferring(o Object) {
|
||||
s.lock.Lock()
|
||||
@@ -159,6 +194,12 @@ func (s *StatsInfo) DoneTransferring(o Object) {
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
// The mutex is to make sure Read() and Close() aren't called
|
||||
// concurrently. Unfortunately the persistent connection loop
|
||||
// in http transport calls Read() after Do() returns on
|
||||
// CancelRequest so this race can happen when it apparently
|
||||
// shouldn't.
|
||||
mu sync.Mutex
|
||||
in io.ReadCloser
|
||||
bytes int64
|
||||
}
|
||||
@@ -172,17 +213,25 @@ func NewAccount(in io.ReadCloser) *Account {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *Account) Read(p []byte) (n int, err error) {
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
n, err = file.in.Read(p)
|
||||
file.bytes += int64(n)
|
||||
Stats.Bytes(int64(n))
|
||||
if err == io.EOF {
|
||||
// FIXME Do something?
|
||||
}
|
||||
// Limit the transfer speed if required
|
||||
if tokenBucket != nil {
|
||||
tokenBucket.Wait(int64(n))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close the object
|
||||
func (file *Account) Close() error {
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
// FIXME do something?
|
||||
return file.in.Close()
|
||||
}
|
||||
|
||||
182
fs/config.go
182
fs/config.go
@@ -6,6 +6,8 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"github.com/ogier/pflag"
|
||||
)
|
||||
|
||||
@@ -22,6 +25,8 @@ const (
|
||||
configFileName = ".rclone.conf"
|
||||
)
|
||||
|
||||
type SizeSuffix int64
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Config file
|
||||
@@ -33,34 +38,151 @@ var (
|
||||
// Global config
|
||||
Config = &ConfigInfo{}
|
||||
// Flags
|
||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
bwLimit SizeSuffix
|
||||
)
|
||||
|
||||
func init() {
|
||||
pflag.VarP(&bwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix k|M|G")
|
||||
}
|
||||
|
||||
// Turn SizeSuffix into a string
|
||||
func (x SizeSuffix) String() string {
|
||||
scaled := float64(0)
|
||||
suffix := ""
|
||||
switch {
|
||||
case x == 0:
|
||||
return "0"
|
||||
case x < 1024*1024:
|
||||
scaled = float64(x) / 1024
|
||||
suffix = "k"
|
||||
case x < 1024*1024*1024:
|
||||
scaled = float64(x) / 1024 / 1024
|
||||
suffix = "M"
|
||||
default:
|
||||
scaled = float64(x) / 1024 / 1024 / 1024
|
||||
suffix = "G"
|
||||
}
|
||||
if math.Floor(scaled) == scaled {
|
||||
return fmt.Sprintf("%.0f%s", scaled, suffix)
|
||||
}
|
||||
return fmt.Sprintf("%.3f%s", scaled, suffix)
|
||||
}
|
||||
|
||||
// Set a SizeSuffix
|
||||
func (x *SizeSuffix) Set(s string) error {
|
||||
if len(s) == 0 {
|
||||
return fmt.Errorf("Empty string")
|
||||
}
|
||||
suffix := s[len(s)-1]
|
||||
suffixLen := 1
|
||||
var multiplier float64
|
||||
switch suffix {
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.':
|
||||
suffixLen = 0
|
||||
multiplier = 1 << 10
|
||||
case 'k', 'K':
|
||||
multiplier = 1 << 10
|
||||
case 'm', 'M':
|
||||
multiplier = 1 << 20
|
||||
case 'g', 'G':
|
||||
multiplier = 1 << 30
|
||||
default:
|
||||
return fmt.Errorf("Bad suffix %q", suffix)
|
||||
}
|
||||
s = s[:len(s)-suffixLen]
|
||||
value, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if value < 0 {
|
||||
return fmt.Errorf("Size can't be negative %q", s)
|
||||
}
|
||||
value *= multiplier
|
||||
*x = SizeSuffix(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*SizeSuffix)(nil)
|
||||
|
||||
// Filesystem config options
|
||||
type ConfigInfo struct {
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
}
|
||||
|
||||
// Transport returns an http.RoundTripper with the correct timeouts
|
||||
func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
return &httpclient.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
|
||||
|
||||
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
|
||||
// a connect to complete.
|
||||
ConnectTimeout: ci.ConnectTimeout,
|
||||
|
||||
// ResponseHeaderTimeout, if non-zero, specifies the amount of
|
||||
// time to wait for a server's response headers after fully
|
||||
// writing the request (including its body, if any). This
|
||||
// time does not include the time to read the response body.
|
||||
ResponseHeaderTimeout: ci.Timeout,
|
||||
|
||||
// RequestTimeout, if non-zero, specifies the amount of time for the entire
|
||||
// request to complete (including all of the above timeouts + entire response body).
|
||||
// This should never be less than the sum total of the above two timeouts.
|
||||
//RequestTimeout: NOT SET,
|
||||
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout: ci.Timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Transport returns an http.Client with the correct timeouts
|
||||
func (ci *ConfigInfo) Client() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: ci.Transport(),
|
||||
}
|
||||
}
|
||||
|
||||
// Find the config directory
|
||||
func configHome() string {
|
||||
// Find users home directory
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
log.Printf("Couldn't find home directory: %v", err)
|
||||
return ""
|
||||
if err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return usr.HomeDir
|
||||
// Fall back to reading $HOME - work around user.Current() not
|
||||
// working for cross compiled binaries on OSX.
|
||||
// https://github.com/golang/go/issues/6376
|
||||
home := os.Getenv("HOME")
|
||||
if home != "" {
|
||||
return home
|
||||
}
|
||||
log.Printf("Couldn't find home directory or read HOME environment variable.")
|
||||
log.Printf("Defaulting to storing config in current directory.")
|
||||
log.Printf("Use -config flag to workaround.")
|
||||
log.Printf("Error was: %v", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Loads the config file
|
||||
@@ -74,6 +196,10 @@ func LoadConfig() {
|
||||
Config.Checkers = *checkers
|
||||
Config.Transfers = *transfers
|
||||
Config.DryRun = *dryRun
|
||||
Config.Timeout = *timeout
|
||||
Config.ConnectTimeout = *connectTimeout
|
||||
Config.CheckSum = *checkSum
|
||||
Config.SizeOnly = *sizeOnly
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
@@ -81,12 +207,15 @@ func LoadConfig() {
|
||||
var err error
|
||||
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
|
||||
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
|
||||
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read null config file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the token bucket limiter
|
||||
startTokenBucket()
|
||||
}
|
||||
|
||||
// Save configuration file.
|
||||
@@ -320,9 +449,20 @@ func EditConfig() {
|
||||
name := ChooseRemote()
|
||||
EditRemote(name)
|
||||
case 'n':
|
||||
fmt.Printf("name> ")
|
||||
name := ReadLine()
|
||||
NewRemote(name)
|
||||
nameLoop:
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name := ReadLine()
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name\n")
|
||||
case isDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
|
||||
default:
|
||||
NewRemote(name)
|
||||
break nameLoop
|
||||
}
|
||||
}
|
||||
case 'd':
|
||||
name := ChooseRemote()
|
||||
DeleteRemote(name)
|
||||
|
||||
57
fs/config_test.go
Normal file
57
fs/config_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package fs
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestSizeSuffixString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in float64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{102, "0.100k"},
|
||||
{1024, "1k"},
|
||||
{1024 * 1024, "1M"},
|
||||
{1024 * 1024 * 1024, "1G"},
|
||||
{10 * 1024 * 1024 * 1024, "10G"},
|
||||
{10.1 * 1024 * 1024 * 1024, "10.100G"},
|
||||
} {
|
||||
ss := SizeSuffix(test.in)
|
||||
got := ss.String()
|
||||
if test.want != got {
|
||||
t.Errorf("Want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixSet(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
in string
|
||||
want int64
|
||||
err bool
|
||||
}{
|
||||
{"0", 0, false},
|
||||
{"0.1k", 102, false},
|
||||
{"0.1", 102, false},
|
||||
{"1K", 1024, false},
|
||||
{"1", 1024, false},
|
||||
{"2.5", 1024 * 2.5, false},
|
||||
{"1M", 1024 * 1024, false},
|
||||
{"1.g", 1024 * 1024 * 1024, false},
|
||||
{"10G", 10 * 1024 * 1024 * 1024, false},
|
||||
{"", 0, true},
|
||||
{"1p", 0, true},
|
||||
{"1.p", 0, true},
|
||||
{"1p", 0, true},
|
||||
{"-1K", 0, true},
|
||||
} {
|
||||
ss := SizeSuffix(0)
|
||||
err := ss.Set(test.in)
|
||||
if (err != nil) != test.err {
|
||||
t.Errorf("%d: Expecting error %v but got error %v", i, test.err, err)
|
||||
}
|
||||
got := int64(ss)
|
||||
if test.want != got {
|
||||
t.Errorf("%d: Want %v got %v", i, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
fs/driveletter.go
Normal file
12
fs/driveletter.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
// isDriveLetter returns a bool indicating whether name is a valid
|
||||
// Windows drive letter
|
||||
//
|
||||
// On non windows platforms we don't have drive letters so we always
|
||||
// return false
|
||||
func isDriveLetter(name string) bool {
|
||||
return false
|
||||
}
|
||||
13
fs/driveletter_windows.go
Normal file
13
fs/driveletter_windows.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
// isDriveLetter returns a bool indicating whether name is a valid
|
||||
// Windows drive letter
|
||||
func isDriveLetter(name string) bool {
|
||||
if len(name) != 1 {
|
||||
return false
|
||||
}
|
||||
c := name[0]
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
119
fs/fs.go
119
fs/fs.go
@@ -1,11 +1,11 @@
|
||||
// File system interface
|
||||
|
||||
// Generic file system interface for rclone object storage systems
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
@@ -14,12 +14,20 @@ import (
|
||||
const (
|
||||
// User agent for Fs which can set it
|
||||
UserAgent = "rclone/" + Version
|
||||
// Very large precision value to show mod time isn't supported
|
||||
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Filesystem registry
|
||||
fsRegistry []*FsInfo
|
||||
// Error returned by NewFs if not found in config file
|
||||
NotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
||||
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
|
||||
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
|
||||
)
|
||||
|
||||
// Filesystem info
|
||||
@@ -59,6 +67,9 @@ func Register(info *FsInfo) {
|
||||
|
||||
// A Filesystem, describes the local filesystem and the remote object store
|
||||
type Fs interface {
|
||||
// The name of the remote (as passed into NewFs)
|
||||
Name() string
|
||||
|
||||
// String returns a description of the FS
|
||||
String() string
|
||||
|
||||
@@ -79,9 +90,13 @@ type Fs interface {
|
||||
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
|
||||
|
||||
// Make the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
Mkdir() error
|
||||
|
||||
// Remove the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
Rmdir() error
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
@@ -101,9 +116,11 @@ type Object interface {
|
||||
Remote() string
|
||||
|
||||
// Md5sum returns the md5 checksum of the file
|
||||
// If no Md5sum is available it returns ""
|
||||
Md5sum() (string, error)
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
ModTime() time.Time
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
@@ -131,9 +148,77 @@ type Purger interface {
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
Purge() error
|
||||
}
|
||||
|
||||
type Copier interface {
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
Copy(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
type Mover interface {
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
Move(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
type DirMover interface {
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
DirMove(src Fs) error
|
||||
}
|
||||
|
||||
// An optional interface for error as to whether the operation should be retried
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// A type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// A channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
@@ -159,9 +244,6 @@ type Dir struct {
|
||||
// A channel of Dir objects
|
||||
type DirChan chan *Dir
|
||||
|
||||
// Pattern to match a url
|
||||
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
|
||||
// Finds a FsInfo object for the name passed in
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
@@ -174,34 +256,43 @@ func Find(name string) (*FsInfo, error) {
|
||||
return nil, fmt.Errorf("Didn't find filing system for %q", name)
|
||||
}
|
||||
|
||||
// Pattern to match an rclone url
|
||||
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
// The path is of the form service://path
|
||||
// The path is of the form remote:path
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
// Remotes are looked up in the config file. If the remote isn't
|
||||
// found then NotFoundInConfigFile will be returned.
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(path string) (Fs, error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
fsName, configName, fsPath := "local", "local", path
|
||||
if parts != nil {
|
||||
if parts != nil && !isDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
var err error
|
||||
fsName, err = ConfigFile.GetValue(configName, "type")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Didn't find section in config file for %q", configName)
|
||||
return nil, NotFoundInConfigFile
|
||||
}
|
||||
}
|
||||
fs, err := Find(fsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
return fs.NewFs(configName, fsPath)
|
||||
}
|
||||
|
||||
// Outputs log for object
|
||||
func OutputLog(o interface{}, text string, args ...interface{}) {
|
||||
description := ""
|
||||
if x, ok := o.(fmt.Stringer); ok {
|
||||
description = x.String() + ": "
|
||||
if o != nil {
|
||||
description = fmt.Sprintf("%v: ", o)
|
||||
}
|
||||
out := fmt.Sprintf(text, args...)
|
||||
log.Print(description + out)
|
||||
@@ -221,6 +312,12 @@ func Log(o interface{}, text string, args ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Write error log output for this Object or Fs
|
||||
// Unconditionally logs a message regardless of Config.Quiet or Config.Verbose
|
||||
func ErrorLog(o interface{}, text string, args ...interface{}) {
|
||||
OutputLog(o, text, args...)
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
|
||||
@@ -21,6 +21,11 @@ func NewLimited(fs Fs, objects ...Object) Fs {
|
||||
return f
|
||||
}
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *Limited) Name() string {
|
||||
return f.fs.Name() // return name of underlying remote
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Limited) String() string {
|
||||
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
|
||||
@@ -76,7 +81,8 @@ func (f *Limited) Mkdir() error {
|
||||
|
||||
// Remove the directory (container, bucket) if empty
|
||||
func (f *Limited) Rmdir() error {
|
||||
return fmt.Errorf("Can't rmdir in limited fs")
|
||||
// Ignore this in a limited fs
|
||||
return nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
@@ -84,5 +90,23 @@ func (f *Limited) Precision() time.Duration {
|
||||
return f.fs.Precision()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Limited) Copy(src Object, remote string) (Object, error) {
|
||||
fCopy, ok := f.fs.(Copier)
|
||||
if !ok {
|
||||
return nil, ErrorCantCopy
|
||||
}
|
||||
return fCopy.Copy(src, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ Fs = &Limited{}
|
||||
var _ Copier = &Limited{}
|
||||
|
||||
402
fs/operations.go
402
fs/operations.go
@@ -4,8 +4,11 @@ package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"io"
|
||||
"mime"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Work out modify window for fses passed in - sets Config.ModifyWindow
|
||||
@@ -19,47 +22,69 @@ func CalculateModifyWindow(fs ...Fs) {
|
||||
if precision > Config.ModifyWindow {
|
||||
Config.ModifyWindow = precision
|
||||
}
|
||||
if precision == ModTimeNotSupported {
|
||||
Debug(f, "Modify window not supported")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
|
||||
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
|
||||
func Md5sumsEqual(src, dst string) bool {
|
||||
if src == "" || dst == "" {
|
||||
return true
|
||||
}
|
||||
return src == dst
|
||||
}
|
||||
|
||||
// Check the two files to see if the MD5sums are the same
|
||||
//
|
||||
// Returns two bools, the first of which is equality and the second of
|
||||
// which is true if either of the MD5SUMs were unset.
|
||||
//
|
||||
// May return an error which will already have been logged
|
||||
//
|
||||
// If an error is returned it will return false
|
||||
func CheckMd5sums(src, dst Object) (bool, error) {
|
||||
// If an error is returned it will return equal as false
|
||||
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
|
||||
srcMd5, err := src.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to calculate src md5: %s", err)
|
||||
return false, err
|
||||
ErrorLog(src, "Failed to calculate src md5: %s", err)
|
||||
return false, false, err
|
||||
}
|
||||
if srcMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
dstMd5, err := dst.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, err
|
||||
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, false, err
|
||||
}
|
||||
if dstMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
// Debug("Src MD5 %s", srcMd5)
|
||||
// Debug("Dst MD5 %s", obj.Hash)
|
||||
return srcMd5 == dstMd5, nil
|
||||
return Md5sumsEqual(srcMd5, dstMd5), false, nil
|
||||
}
|
||||
|
||||
// Checks to see if the src and dst objects are equal by looking at
|
||||
// size, mtime and MD5SUM
|
||||
//
|
||||
// If the src and dst size are different then it is considered to be
|
||||
// not equal.
|
||||
// not equal. If --size-only is in effect then this is the only check
|
||||
// that is done.
|
||||
//
|
||||
// If the size is the same and the mtime is the same then it is
|
||||
// considered to be equal. This is the heuristic rsync uses when
|
||||
// not using --checksum.
|
||||
// considered to be equal. This check is skipped if using --checksum.
|
||||
//
|
||||
// If the size is the same and and mtime is different or unreadable
|
||||
// and the MD5SUM is the same then the file is considered to be equal.
|
||||
// In this case the mtime on the dst is updated.
|
||||
// If the size is the same and mtime is different, unreadable or
|
||||
// --checksum is set and the MD5SUM is the same then the file is
|
||||
// considered to be equal. In this case the mtime on the dst is
|
||||
// updated if --checksum is not set.
|
||||
//
|
||||
// Otherwise the file is considered to be not equal including if there
|
||||
// were errors reading info.
|
||||
@@ -68,45 +93,75 @@ func Equal(src, dst Object) bool {
|
||||
Debug(src, "Sizes differ")
|
||||
return false
|
||||
}
|
||||
|
||||
// Size the same so check the mtime
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
if Config.SizeOnly {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
|
||||
var srcModTime time.Time
|
||||
if !Config.CheckSum {
|
||||
if Config.ModifyWindow == ModTimeNotSupported {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
// Size the same so check the mtime
|
||||
srcModTime = src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// mtime is unreadable or different but size is the same so
|
||||
// check the MD5SUM
|
||||
same, _ := CheckMd5sums(src, dst)
|
||||
same, md5unset, _ := CheckMd5sums(src, dst)
|
||||
if !same {
|
||||
Debug(src, "Md5sums differ")
|
||||
return false
|
||||
}
|
||||
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
if !Config.CheckSum {
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
}
|
||||
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
if md5unset {
|
||||
Debug(src, "Size of src and dst objects identical")
|
||||
} else {
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used to remove a failed copy
|
||||
func removeFailedCopy(dst Object) {
|
||||
if dst != nil {
|
||||
Debug(dst, "Removing failed copy")
|
||||
removeErr := dst.Remove()
|
||||
if removeErr != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to remove failed copy: %s", removeErr)
|
||||
}
|
||||
// Returns a guess at the mime type from the extension
|
||||
func MimeType(o Object) string {
|
||||
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
|
||||
if mimeType == "" {
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
return mimeType
|
||||
}
|
||||
|
||||
// Used to remove a failed copy
|
||||
//
|
||||
// Returns whether the file was succesfully removed or not
|
||||
func removeFailedCopy(dst Object) bool {
|
||||
if dst == nil {
|
||||
return false
|
||||
}
|
||||
Debug(dst, "Removing failed copy")
|
||||
removeErr := dst.Remove()
|
||||
if removeErr != nil {
|
||||
Debug(dst, "Failed to remove failed copy: %s", removeErr)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Copy src object to dst or f if nil
|
||||
@@ -115,29 +170,60 @@ func removeFailedCopy(dst Object) {
|
||||
// call Copy() with dst nil on a pre-existing file then some filing
|
||||
// systems (eg Drive) may duplicate the file.
|
||||
func Copy(f Fs, dst, src Object) {
|
||||
in0, err := src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
|
||||
var actionTaken string
|
||||
if dst != nil {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
const maxTries = 10
|
||||
tries := 0
|
||||
doUpdate := dst != nil
|
||||
var err, inErr error
|
||||
tryAgain:
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken := "Copied (server side copy)"
|
||||
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
|
||||
var newDst Object
|
||||
newDst, err = fCopy.Copy(src, src.Remote())
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
}
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
err = ErrorCantCopy
|
||||
}
|
||||
// If can't server side copy, do it manually
|
||||
if err == ErrorCantCopy {
|
||||
var in0 io.ReadCloser
|
||||
in0, err = src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
|
||||
if doUpdate {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
}
|
||||
inErr = in.Close()
|
||||
}
|
||||
// Retry if err returned a retry error
|
||||
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
|
||||
tries++
|
||||
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
|
||||
if removeFailedCopy(dst) {
|
||||
// If we removed dst, then nil it out and note we are not updating
|
||||
dst = nil
|
||||
doUpdate = false
|
||||
}
|
||||
goto tryAgain
|
||||
}
|
||||
inErr := in.Close()
|
||||
if err == nil {
|
||||
err = inErr
|
||||
}
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to copy: %s", err)
|
||||
ErrorLog(src, "Failed to copy: %s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
@@ -146,27 +232,29 @@ func Copy(f Fs, dst, src Object) {
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
Log(dst, "%s", err)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify md5sums are the same after transfer - ignoring blank md5sums
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if !Config.SizeOnly {
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
Log(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,7 +295,7 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
// Read Objects on in and copy them
|
||||
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
@@ -221,16 +309,43 @@ func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
// Read Objects on in and move them if possible, or copy them if not
|
||||
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src)
|
||||
if Config.DryRun {
|
||||
Debug(src, "Not moving as --dry-run")
|
||||
} else if haveMover {
|
||||
// Delete destination if it exists
|
||||
if pair.dst != nil {
|
||||
err := dst.Remove()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't delete: %s", err)
|
||||
}
|
||||
}
|
||||
fdstMover.Move(src, src.Remote())
|
||||
Debug(src, "Moved")
|
||||
} else {
|
||||
Copy(fdst, pair.dst, src)
|
||||
}
|
||||
Stats.DoneTransferring(src)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all the files passed in the channel
|
||||
func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(Config.Transfers)
|
||||
var fs Fs
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range to_be_deleted {
|
||||
fs = dst.Fs()
|
||||
if Config.DryRun {
|
||||
Debug(dst, "Not deleting as --dry-run")
|
||||
} else {
|
||||
@@ -239,7 +354,7 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
Stats.DoneChecking(dst)
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Couldn't delete: %s", err)
|
||||
ErrorLog(dst, "Couldn't delete: %s", err)
|
||||
} else {
|
||||
Debug(dst, "Deleted")
|
||||
}
|
||||
@@ -247,15 +362,30 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
Log(fs, "Waiting for deletions to finish")
|
||||
Log(nil, "Waiting for deletions to finish")
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Read a map of Object.Remote to Object for the given Fs
|
||||
func readFilesMap(fs Fs) map[string]Object {
|
||||
files := make(map[string]Object)
|
||||
for o := range fs.List() {
|
||||
remote := o.Remote()
|
||||
if _, ok := files[remote]; !ok {
|
||||
files[remote] = o
|
||||
} else {
|
||||
Log(o, "Duplicate file detected")
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
|
||||
err := fdst.Mkdir()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
@@ -266,10 +396,7 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
|
||||
// Read the destination files first
|
||||
// FIXME could do this in parallel and make it use less memory
|
||||
delFiles := make(map[string]Object)
|
||||
for dst := range fdst.List() {
|
||||
delFiles[dst.Remote()] = dst
|
||||
}
|
||||
delFiles := readFilesMap(fdst)
|
||||
|
||||
// Read source files checking them off against dest files
|
||||
to_be_checked := make(ObjectPairChan, Config.Transfers)
|
||||
@@ -284,7 +411,11 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
var copierWg sync.WaitGroup
|
||||
copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
go Copier(to_be_uploaded, fdst, &copierWg)
|
||||
if DoMove {
|
||||
go PairMover(to_be_uploaded, fdst, &copierWg)
|
||||
} else {
|
||||
go PairCopier(to_be_uploaded, fdst, &copierWg)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
@@ -311,7 +442,7 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
// Delete files if asked
|
||||
if Delete {
|
||||
if Stats.Errored() {
|
||||
Log(fdst, "Not deleting files as there were IO errors")
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -328,41 +459,77 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, true, false)
|
||||
}
|
||||
|
||||
// Copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, false, false)
|
||||
}
|
||||
|
||||
// Moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
// First attempt to use DirMover
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
Debug(fdst, "Using server side directory move")
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Now move the files
|
||||
err := syncCopyMove(fdst, fsrc, false, true)
|
||||
if err != nil || Stats.Errored() {
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return err
|
||||
}
|
||||
return Purge(fsrc)
|
||||
}
|
||||
|
||||
// Checks the files in fsrc and fdst according to Size and MD5SUM
|
||||
func Check(fdst, fsrc Fs) error {
|
||||
Log(fdst, "Building file list")
|
||||
|
||||
// Read the destination files first
|
||||
// FIXME could do this in parallel and make it use less memory
|
||||
dstFiles := make(map[string]Object)
|
||||
for dst := range fdst.List() {
|
||||
dstFiles[dst.Remote()] = dst
|
||||
}
|
||||
dstFiles := readFilesMap(fdst)
|
||||
|
||||
// Read the source files checking them against dstFiles
|
||||
// FIXME could do this in parallel and make it use less memory
|
||||
srcFiles := make(map[string]Object)
|
||||
srcFiles := readFilesMap(fsrc)
|
||||
|
||||
// Move all the common files into commonFiles and delete then
|
||||
// from srcFiles and dstFiles
|
||||
commonFiles := make(map[string][]Object)
|
||||
for src := range fsrc.List() {
|
||||
remote := src.Remote()
|
||||
for remote, src := range srcFiles {
|
||||
if dst, ok := dstFiles[remote]; ok {
|
||||
commonFiles[remote] = []Object{dst, src}
|
||||
delete(srcFiles, remote)
|
||||
delete(dstFiles, remote)
|
||||
} else {
|
||||
srcFiles[remote] = src
|
||||
}
|
||||
}
|
||||
|
||||
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
|
||||
for _, dst := range dstFiles {
|
||||
Stats.Error()
|
||||
Log(dst, "File not in %v", fsrc)
|
||||
ErrorLog(dst, "File not in %v", fsrc)
|
||||
}
|
||||
|
||||
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
|
||||
for _, src := range srcFiles {
|
||||
Stats.Error()
|
||||
Log(src, "File not in %v", fdst)
|
||||
ErrorLog(src, "File not in %v", fdst)
|
||||
}
|
||||
|
||||
checks := make(chan []Object, Config.Transfers)
|
||||
@@ -384,17 +551,17 @@ func Check(fdst, fsrc Fs) error {
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.DoneChecking(src)
|
||||
Stats.Error()
|
||||
Log(src, "Sizes differ")
|
||||
ErrorLog(src, "Sizes differ")
|
||||
continue
|
||||
}
|
||||
same, err := CheckMd5sums(src, dst)
|
||||
same, _, err := CheckMd5sums(src, dst)
|
||||
Stats.DoneChecking(src)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !same {
|
||||
Stats.Error()
|
||||
Log(src, "Md5sums differ")
|
||||
ErrorLog(src, "Md5sums differ")
|
||||
}
|
||||
Debug(src, "OK")
|
||||
}
|
||||
@@ -429,14 +596,24 @@ func ListFn(f Fs, fn func(Object)) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mutex for synchronized output
|
||||
var outMutex sync.Mutex
|
||||
|
||||
// Synchronized fmt.Fprintf
|
||||
func syncFprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
outMutex.Lock()
|
||||
defer outMutex.Unlock()
|
||||
return fmt.Fprintf(w, format, a...)
|
||||
}
|
||||
|
||||
// List the Fs to stdout
|
||||
//
|
||||
// Shows size and path
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func List(f Fs) error {
|
||||
func List(f Fs, w io.Writer) error {
|
||||
return ListFn(f, func(o Object) {
|
||||
fmt.Printf("%9d %s\n", o.Size(), o.Remote())
|
||||
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -445,12 +622,12 @@ func List(f Fs) error {
|
||||
// Shows size, mod time and path
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func ListLong(f Fs) error {
|
||||
func ListLong(f Fs, w io.Writer) error {
|
||||
return ListFn(f, func(o Object) {
|
||||
Stats.Checking(o)
|
||||
modTime := o.ModTime()
|
||||
Stats.DoneChecking(o)
|
||||
fmt.Printf("%9d %19s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.00000000"), o.Remote())
|
||||
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -459,23 +636,23 @@ func ListLong(f Fs) error {
|
||||
// Produces the same output as the md5sum command
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func Md5sum(f Fs) error {
|
||||
func Md5sum(f Fs, w io.Writer) error {
|
||||
return ListFn(f, func(o Object) {
|
||||
Stats.Checking(o)
|
||||
md5sum, err := o.Md5sum()
|
||||
Stats.DoneChecking(o)
|
||||
if err != nil {
|
||||
Debug(o, "Failed to read MD5: %v", err)
|
||||
md5sum = "UNKNOWN"
|
||||
md5sum = "ERROR"
|
||||
}
|
||||
fmt.Printf("%32s %s\n", md5sum, o.Remote())
|
||||
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
|
||||
})
|
||||
}
|
||||
|
||||
// List the directories/buckets/containers in the Fs to stdout
|
||||
func ListDir(f Fs) error {
|
||||
func ListDir(f Fs, w io.Writer) error {
|
||||
for dir := range f.ListDir() {
|
||||
fmt.Printf("%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
|
||||
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -508,20 +685,21 @@ func Rmdir(f Fs) error {
|
||||
//
|
||||
// FIXME doesn't delete local directories
|
||||
func Purge(f Fs) error {
|
||||
var err error
|
||||
if purger, ok := f.(Purger); ok {
|
||||
if Config.DryRun {
|
||||
Debug(f, "Not purging as --dry-run set")
|
||||
} else {
|
||||
err := purger.Purge()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
return err
|
||||
}
|
||||
err = purger.Purge()
|
||||
}
|
||||
} else {
|
||||
// DeleteFiles and Rmdir observe --dry-run
|
||||
DeleteFiles(f.List())
|
||||
log.Printf("Deleting path")
|
||||
Rmdir(f)
|
||||
err = Rmdir(f)
|
||||
}
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
539
fs/operations_test.go
Normal file
539
fs/operations_test.go
Normal file
@@ -0,0 +1,539 @@
|
||||
// Test rclone by doing real transactions to a storage provider to and
|
||||
// from the local disk
|
||||
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
localName, remoteName string
|
||||
flocal, fremote fs.Fs
|
||||
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
|
||||
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
|
||||
Verbose = flag.Bool("verbose", false, "Set to enable logging")
|
||||
finalise func()
|
||||
)
|
||||
|
||||
// Write a file
|
||||
func WriteFile(filePath, content string, t time.Time) {
|
||||
// FIXME make directories?
|
||||
filePath = path.Join(localName, filePath)
|
||||
dirPath := path.Dir(filePath)
|
||||
err := os.MkdirAll(dirPath, 0770)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
|
||||
}
|
||||
err = ioutil.WriteFile(filePath, []byte(content), 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write file %q: %v", filePath, err)
|
||||
}
|
||||
err = os.Chtimes(filePath, t, t)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
var t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
var t2 = fstest.Time("2011-12-25T12:59:59.123456789Z")
|
||||
var t3 = fstest.Time("2011-12-30T12:59:59.000000000Z")
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
fs.LoadConfig()
|
||||
fs.Config.Verbose = *Verbose
|
||||
fs.Config.Quiet = !*Verbose
|
||||
var err error
|
||||
fremote, finalise, err = fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote %q: %v", *RemoteName, err)
|
||||
}
|
||||
t.Logf("Testing with remote %v", fremote)
|
||||
|
||||
localName, err = ioutil.TempDir("", "rclone")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
localName = filepath.ToSlash(localName)
|
||||
t.Logf("Testing with local %q", localName)
|
||||
flocal, err = fs.NewFs(localName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make %q: %v", remoteName, err)
|
||||
}
|
||||
|
||||
}
|
||||
func TestCalculateModifyWindow(t *testing.T) {
|
||||
fs.CalculateModifyWindow(fremote, flocal)
|
||||
t.Logf("ModifyWindow is %q", fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestMkdir(t *testing.T) {
|
||||
fstest.TestMkdir(t, fremote)
|
||||
}
|
||||
|
||||
// Check dry run is working
|
||||
func TestCopyWithDryRun(t *testing.T) {
|
||||
WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, []fstest.Item{}, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote copy %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteCopy, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLsd(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.ListDir(fremote, &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("ListDir failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, "sub dir\n") {
|
||||
t.Fatalf("Result wrong %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
// Now delete the local file and download it
|
||||
func TestCopyAfterDelete(t *testing.T) {
|
||||
err := os.Remove(localName + "/sub dir/hello world")
|
||||
if err != nil {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, []fstest.Item{}, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
err := fs.CopyDir(flocal, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Clean the directory
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
WriteFile("check sum", "", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
err = os.Chtimes(localName+"/check sum", t2, t2)
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
local_items = []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
WriteFile("sizeonly", "potato", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
WriteFile("sizeonly", "POTATO", t2)
|
||||
local_items = []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
WriteFile("empty space", "", t1)
|
||||
|
||||
err := os.Chtimes(localName+"/empty space", t2, t2)
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
WriteFile("potato", "smaller but same date", t3)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Sync after changing a file's contents, modtime but not length
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file --dry-run
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
err := os.Remove(localName + "/potato")
|
||||
if err != nil {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
fs.Config.DryRun = true
|
||||
err = fs.Sync(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
before := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 21, ModTime: t2, Md5sum: "e4cb6955d9106df6263c45fcfc10f163"},
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, before, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote move %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseMove()
|
||||
t.Logf("Server side move (if possible) %v -> %v", fremote, fremoteMove)
|
||||
|
||||
// Start with a copy
|
||||
err = fs.CopyDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// Remove one file
|
||||
obj := fremoteMove.NewFsObject("potato2")
|
||||
if obj == nil {
|
||||
t.Fatalf("Failed to find potato2")
|
||||
}
|
||||
err = obj.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove object: %v", err)
|
||||
}
|
||||
|
||||
// Do server side move
|
||||
err = fs.MoveDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items[:0], fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
err = fs.MoveDir(fremote, fremoteMove)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move 2 failed: %v", err)
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items[:0], fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLs(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.List(fremote, &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, " 0 empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, " 60 potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLsLong(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.ListLong(fremote, &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
lines := strings.Split(strings.Trim(res, "\n"), "\n")
|
||||
if len(lines) != 2 {
|
||||
t.Fatalf("Wrong number of lines in list: %q", lines)
|
||||
}
|
||||
|
||||
timeFormat := "2006-01-02 15:04:05.000000000"
|
||||
precision := fremote.Precision()
|
||||
checkTime := func(m, filename string, expected time.Time) {
|
||||
modTime, err := time.Parse(timeFormat, m)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing %q: %v", m, err)
|
||||
} else {
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
|
||||
if ms := m1.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "empty space", t2.Local())
|
||||
}
|
||||
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
|
||||
if ms := m2.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "potato2", t1.Local())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMd5sum(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.Md5sum(fremote, &buf)
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
// Clean the temporary directory
|
||||
func cleanTempDir(t *testing.T) {
|
||||
t.Logf("Cleaning temporary directory: %q", localName)
|
||||
err := os.RemoveAll(localName)
|
||||
if err != nil {
|
||||
t.Logf("Failed to remove %q: %v", localName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalise(t *testing.T) {
|
||||
finalise()
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
29
fs/test_all.sh
Executable file
29
fs/test_all.sh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
go install
|
||||
|
||||
REMOTES="
|
||||
TestSwift:
|
||||
TestS3:
|
||||
TestDrive:
|
||||
TestGoogleCloudStorage:
|
||||
TestDropbox:
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
args=$@
|
||||
echo "@go test $args"
|
||||
go test $args || {
|
||||
echo "*** test $args FAILED ***"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
test_remote
|
||||
test_remote --subdir
|
||||
for remote in $REMOTES; do
|
||||
test_remote --remote $remote
|
||||
test_remote --remote $remote --subdir
|
||||
done
|
||||
|
||||
echo "All OK"
|
||||
@@ -1,3 +1,3 @@
|
||||
package fs
|
||||
|
||||
const Version = "v1.02"
|
||||
const Version = "v1.19"
|
||||
|
||||
245
fstest/fstest.go
Normal file
245
fstest/fstest.go
Normal file
@@ -0,0 +1,245 @@
|
||||
// Utilities for testing the Fs
|
||||
package fstest
|
||||
|
||||
// FIXME put name of test FS in Fs structure
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Seed the random number generator
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
}
|
||||
|
||||
// Represents an item for checking
|
||||
type Item struct {
|
||||
Path string
|
||||
Md5sum string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Checks the times are equal within the precision, returns the delta and a flag
|
||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
// check the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
if obj == nil {
|
||||
t.Fatalf("Object is nil")
|
||||
}
|
||||
// Check attributes
|
||||
Md5sum, err := obj.Md5sum()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
|
||||
}
|
||||
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
|
||||
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
|
||||
}
|
||||
if i.Size != obj.Size() {
|
||||
t.Errorf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
|
||||
}
|
||||
i.CheckModTime(t, obj, obj.ModTime(), precision)
|
||||
}
|
||||
|
||||
// Represents all items for checking
|
||||
type Items struct {
|
||||
byName map[string]*Item
|
||||
items []Item
|
||||
}
|
||||
|
||||
// Make an Items
|
||||
func NewItems(items []Item) *Items {
|
||||
is := &Items{
|
||||
byName: make(map[string]*Item),
|
||||
items: items,
|
||||
}
|
||||
// Fill up byName
|
||||
for i := range items {
|
||||
is.byName[items[i].Path] = &items[i]
|
||||
}
|
||||
return is
|
||||
}
|
||||
|
||||
// Check off an item
|
||||
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i, ok := is.byName[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
}
|
||||
delete(is.byName, obj.Remote())
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
|
||||
// Check all done
|
||||
func (is *Items) Done(t *testing.T) {
|
||||
if len(is.byName) != 0 {
|
||||
for name := range is.byName {
|
||||
log.Printf("Not found %q", name)
|
||||
}
|
||||
t.Errorf("%d objects not found", len(is.byName))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
|
||||
is := NewItems(items)
|
||||
oldErrors := fs.Stats.GetErrors()
|
||||
for obj := range f.List() {
|
||||
if obj == nil {
|
||||
t.Errorf("Unexpected nil in List()")
|
||||
continue
|
||||
}
|
||||
is.Find(t, obj, precision)
|
||||
}
|
||||
is.Done(t)
|
||||
// Don't notice an error when listing an empty directory
|
||||
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
|
||||
precision := f.Precision()
|
||||
CheckListingWithPrecision(t, f, items, precision)
|
||||
}
|
||||
|
||||
// Parse a time string or explode
|
||||
func Time(timeString string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse time %q: %v", timeString, err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Create a random string
|
||||
func RandomString(n int) string {
|
||||
source := "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
out := make([]byte, n)
|
||||
for i := range out {
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Creates a temporary directory name for local remotes
|
||||
func LocalRemote() (path string, err error) {
|
||||
path, err = ioutil.TempDir("", "rclone")
|
||||
if err == nil {
|
||||
// Now remove the directory
|
||||
err = os.Remove(path)
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
return
|
||||
}
|
||||
|
||||
// Make a random bucket or subdirectory name
|
||||
//
|
||||
// Returns a random remote name plus the leaf name
|
||||
func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
var err error
|
||||
var leafName string
|
||||
|
||||
// Make a directory if remote name is null
|
||||
if remoteName == "" {
|
||||
remoteName, err = LocalRemote()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
if !strings.HasSuffix(remoteName, ":") {
|
||||
remoteName += "/"
|
||||
}
|
||||
leafName = RandomString(32)
|
||||
remoteName += leafName
|
||||
}
|
||||
return remoteName, leafName, nil
|
||||
}
|
||||
|
||||
// Make a random bucket or subdirectory on the remote
|
||||
//
|
||||
// Call the finalise function returned to Purge the fs at the end (and
|
||||
// the parent if necessary)
|
||||
func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
|
||||
var err error
|
||||
var parentRemote fs.Fs
|
||||
|
||||
remoteName, _, err = RandomRemoteName(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if subdir {
|
||||
parentRemote, err = fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
remoteName += "/" + RandomString(8)
|
||||
}
|
||||
|
||||
remote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
finalise := func() {
|
||||
_ = fs.Purge(remote) // ignore error
|
||||
if parentRemote != nil {
|
||||
err = fs.Purge(parentRemote) // ignore error
|
||||
if err != nil {
|
||||
log.Printf("Failed to purge %v: %v", parentRemote, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return remote, finalise, nil
|
||||
}
|
||||
|
||||
func TestMkdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Mkdir(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Mkdir failed: %v", err)
|
||||
}
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
func TestPurge(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Purge(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Purge failed: %v", err)
|
||||
}
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
func TestRmdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Rmdir(remote)
|
||||
if err != nil {
|
||||
t.Fatalf("Rmdir failed: %v", err)
|
||||
}
|
||||
}
|
||||
486
fstest/fstests/fstests.go
Normal file
486
fstest/fstests/fstests.go
Normal file
@@ -0,0 +1,486 @@
|
||||
// Generic tests for testing the Fs and Object interfaces
|
||||
//
|
||||
// Run go generate to write the tests for the remotes
|
||||
|
||||
//go:generate go run gen_tests.go
|
||||
package fstests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
)
|
||||
|
||||
var (
|
||||
remote fs.Fs
|
||||
RemoteName = ""
|
||||
subRemoteName = ""
|
||||
subRemoteLeaf = ""
|
||||
NilObject fs.Object
|
||||
file1 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "file name.txt",
|
||||
}
|
||||
file2 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
||||
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
var err error
|
||||
fs.LoadConfig()
|
||||
fs.Config.Verbose = false
|
||||
fs.Config.Quiet = true
|
||||
t.Logf("Using remote %q", RemoteName)
|
||||
if RemoteName == "" {
|
||||
RemoteName, err = fstest.LocalRemote()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create tmp dir: %v", err)
|
||||
}
|
||||
}
|
||||
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(RemoteName)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't make remote name: %v", err)
|
||||
}
|
||||
|
||||
remote, err = fs.NewFs(subRemoteName)
|
||||
if err == fs.NotFoundInConfigFile {
|
||||
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't start FS: %v", err)
|
||||
}
|
||||
fstest.TestMkdir(t, remote)
|
||||
}
|
||||
|
||||
func skipIfNotOk(t *testing.T) {
|
||||
if remote == nil {
|
||||
t.Skip("FS not configured")
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
|
||||
func TestFsString(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
str := remote.String()
|
||||
if str == "" {
|
||||
t.Fatal("Bad fs.String()")
|
||||
}
|
||||
}
|
||||
|
||||
type TestFile struct {
|
||||
ModTime time.Time
|
||||
Path string
|
||||
Size int64
|
||||
Md5sum string
|
||||
}
|
||||
|
||||
func TestFsRmdirEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestRmdir(t, remote)
|
||||
}
|
||||
|
||||
func TestFsRmdirNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
err := remote.Rmdir()
|
||||
if err == nil {
|
||||
t.Fatalf("Expecting error on Rmdir non existent")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsMkdir(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestMkdir(t, remote)
|
||||
fstest.TestMkdir(t, remote)
|
||||
}
|
||||
|
||||
func TestFsListEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{})
|
||||
}
|
||||
|
||||
func TestFsListDirEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
for obj := range remote.ListDir() {
|
||||
t.Errorf("Found unexpected item %q", obj.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
if remote.NewFsObject("potato") != nil {
|
||||
t.Fatal("Didn't expect to find object")
|
||||
}
|
||||
}
|
||||
|
||||
func findObject(t *testing.T, Name string) fs.Object {
|
||||
obj := remote.NewFsObject(Name)
|
||||
if obj == nil {
|
||||
t.Fatalf("Object not found: %q", Name)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
func testPut(t *testing.T, file *fstest.Item) {
|
||||
buf := bytes.NewBufferString(fstest.RandomString(100))
|
||||
hash := md5.New()
|
||||
in := io.TeeReader(buf, hash)
|
||||
|
||||
file.Size = int64(buf.Len())
|
||||
obj, err := remote.Put(in, file.Path, file.ModTime, file.Size)
|
||||
if err != nil {
|
||||
t.Fatal("Put error", err)
|
||||
}
|
||||
file.Md5sum = hex.EncodeToString(hash.Sum(nil))
|
||||
file.Check(t, obj, remote.Precision())
|
||||
// Re-read the object and check again
|
||||
obj = findObject(t, file.Path)
|
||||
file.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
func TestFsPutFile1(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
testPut(t, &file1)
|
||||
}
|
||||
|
||||
func TestFsPutFile2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
testPut(t, &file2)
|
||||
}
|
||||
|
||||
func TestFsListDirFile2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
found := false
|
||||
for obj := range remote.ListDir() {
|
||||
if obj.Name != `hello? sausage` {
|
||||
t.Errorf("Found unexpected item %q", obj.Name)
|
||||
} else {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Didn't find %q", `hello? sausage`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsListDirRoot(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
rootRemote, err := fs.NewFs(RemoteName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
|
||||
}
|
||||
found := false
|
||||
for obj := range rootRemote.ListDir() {
|
||||
if obj.Name == subRemoteLeaf {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Didn't find %q", subRemoteLeaf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsListRoot(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
rootRemote, err := fs.NewFs(RemoteName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
|
||||
}
|
||||
// Should either find file1 and file2 or nothing
|
||||
found1 := false
|
||||
file1 := subRemoteLeaf + "/" + file1.Path
|
||||
found2 := false
|
||||
file2 := subRemoteLeaf + "/" + file2.Path
|
||||
count := 0
|
||||
errors := fs.Stats.GetErrors()
|
||||
for obj := range rootRemote.List() {
|
||||
count++
|
||||
if obj.Remote() == file1 {
|
||||
found1 = true
|
||||
}
|
||||
if obj.Remote() == file2 {
|
||||
found2 = true
|
||||
}
|
||||
}
|
||||
errors -= fs.Stats.GetErrors()
|
||||
if count == 0 {
|
||||
if errors == 0 {
|
||||
t.Error("Expecting error if count==0")
|
||||
}
|
||||
return
|
||||
}
|
||||
if found1 && found2 {
|
||||
if errors != 0 {
|
||||
t.Error("Not expecting error if found")
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", file1, found1, file2, found2, count)
|
||||
}
|
||||
|
||||
func TestFsListFile1(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
||||
}
|
||||
|
||||
func TestFsNewFsObject(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
file1.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
func TestFsListFile1and2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
||||
}
|
||||
|
||||
func TestFsCopy(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have Copy
|
||||
_, ok := remote.(fs.Copier)
|
||||
if !ok {
|
||||
t.Skip("FS has no Copier interface")
|
||||
}
|
||||
|
||||
var file1Copy = file1
|
||||
file1Copy.Path += "-copy"
|
||||
|
||||
// do the copy
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
|
||||
if err != nil {
|
||||
t.Errorf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
|
||||
|
||||
// Check dst lightly - list above has checked ModTime/Md5sum
|
||||
if dst.Remote() != file1Copy.Path {
|
||||
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
|
||||
}
|
||||
|
||||
// Delete copy
|
||||
err = dst.Remove()
|
||||
if err != nil {
|
||||
t.Fatal("Remove copy error", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFsRmdirFull(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
err := remote.Rmdir()
|
||||
if err == nil {
|
||||
t.Fatalf("Expecting error on RMdir on non empty remote")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFsPrecision(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
precision := remote.Precision()
|
||||
if precision == fs.ModTimeNotSupported {
|
||||
return
|
||||
}
|
||||
if precision > time.Second || precision < 0 {
|
||||
t.Fatalf("Precision out of range %v", precision)
|
||||
}
|
||||
// FIXME check expected precision
|
||||
}
|
||||
|
||||
func TestObjectString(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
s := obj.String()
|
||||
if s != file1.Path {
|
||||
t.Errorf("String() wrong %v != %v", s, file1.Path)
|
||||
}
|
||||
obj = NilObject
|
||||
s = obj.String()
|
||||
if s != "<nil>" {
|
||||
t.Errorf("String() wrong %v != %v", s, "<nil>")
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectFs(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
if obj.Fs() != remote {
|
||||
t.Errorf("Fs is wrong %v != %v", obj.Fs(), remote)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectRemote(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
if obj.Remote() != file1.Path {
|
||||
t.Errorf("Remote is wrong %v != %v", obj.Remote(), file1.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectMd5sum(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
Md5sum, err := obj.Md5sum()
|
||||
if err != nil {
|
||||
t.Errorf("Error in Md5sum: %v", err)
|
||||
}
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectModTime(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
|
||||
}
|
||||
|
||||
func TestObjectSetModTime(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
|
||||
obj := findObject(t, file1.Path)
|
||||
obj.SetModTime(newModTime)
|
||||
file1.ModTime = newModTime
|
||||
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
|
||||
// And make a new object and read it from there too
|
||||
TestObjectModTime(t)
|
||||
}
|
||||
|
||||
func TestObjectSize(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
if obj.Size() != file1.Size {
|
||||
t.Errorf("Size is wrong %v != %v", obj.Size(), file1.Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectOpen(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
in, err := obj.Open()
|
||||
if err != nil {
|
||||
t.Fatalf("Open() return error: %v", err)
|
||||
}
|
||||
hash := md5.New()
|
||||
n, err := io.Copy(hash, in)
|
||||
if err != nil {
|
||||
t.Fatalf("io.Copy() return error: %v", err)
|
||||
}
|
||||
if n != file1.Size {
|
||||
t.Fatalf("Read wrong number of bytes %d != %d", n, file1.Size)
|
||||
}
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("in.Close() return error: %v", err)
|
||||
}
|
||||
Md5sum := hex.EncodeToString(hash.Sum(nil))
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectUpdate(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
buf := bytes.NewBufferString(fstest.RandomString(200))
|
||||
hash := md5.New()
|
||||
in := io.TeeReader(buf, hash)
|
||||
|
||||
file1.Size = int64(buf.Len())
|
||||
obj := findObject(t, file1.Path)
|
||||
err := obj.Update(in, file1.ModTime, file1.Size)
|
||||
if err != nil {
|
||||
t.Fatal("Update error", err)
|
||||
}
|
||||
file1.Md5sum = hex.EncodeToString(hash.Sum(nil))
|
||||
file1.Check(t, obj, remote.Precision())
|
||||
// Re-read the object and check again
|
||||
obj = findObject(t, file1.Path)
|
||||
file1.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
func TestObjectStorable(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
if !obj.Storable() {
|
||||
t.Fatalf("Expecting %v to be storable", obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimitedFs(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
remoteName := subRemoteName + "/" + file2.Path
|
||||
file2Copy := file2
|
||||
file2Copy.Path = "z.txt"
|
||||
fileRemote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
|
||||
}
|
||||
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
|
||||
_, ok := fileRemote.(*fs.Limited)
|
||||
if !ok {
|
||||
t.Errorf("%v is not a fs.Limited", fileRemote)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLimitedFsNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
remoteName := subRemoteName + "/not found.txt"
|
||||
fileRemote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make remote %q: %v", remoteName, err)
|
||||
}
|
||||
fstest.CheckListing(t, fileRemote, []fstest.Item{})
|
||||
_, ok := fileRemote.(*fs.Limited)
|
||||
if ok {
|
||||
t.Errorf("%v is is a fs.Limited", fileRemote)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjectRemove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
err := obj.Remove()
|
||||
if err != nil {
|
||||
t.Fatal("Remove error", err)
|
||||
}
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2})
|
||||
}
|
||||
|
||||
func TestObjectPurge(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestPurge(t, remote)
|
||||
err := fs.Purge(remote)
|
||||
if err == nil {
|
||||
t.Fatal("Expecting error after on second purge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalise(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
if strings.HasPrefix(RemoteName, "/") {
|
||||
// Remove temp directory
|
||||
err := os.Remove(RemoteName)
|
||||
if err != nil {
|
||||
log.Printf("Failed to remove %q: %v\n", RemoteName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
143
fstest/fstests/gen_tests.go
Normal file
143
fstest/fstests/gen_tests.go
Normal file
@@ -0,0 +1,143 @@
|
||||
// +build ignore
|
||||
|
||||
// Make the test files from fstests.go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"html/template"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Search fstests.go and return all the test function names
|
||||
func findTestFunctions() []string {
|
||||
fns := []string{}
|
||||
matcher := regexp.MustCompile(`^func\s+(Test.*?)\(`)
|
||||
|
||||
in, err := os.Open("fstests.go")
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open fstests.go: %v", err)
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
matches := matcher.FindStringSubmatch(line)
|
||||
if len(matches) > 0 {
|
||||
fns = append(fns, matches[1])
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("Error scanning file: %v", err)
|
||||
}
|
||||
return fns
|
||||
}
|
||||
|
||||
// Data to substitute
|
||||
type Data struct {
|
||||
Regenerate string
|
||||
FsName string
|
||||
UpperFsName string
|
||||
TestName string
|
||||
ObjectName string
|
||||
Fns []string
|
||||
}
|
||||
|
||||
var testProgram = `
|
||||
// Test {{ .UpperFsName }} filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: {{ .Regenerate }}
|
||||
package {{ .FsName }}_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/{{ .FsName }}"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
|
||||
fstests.RemoteName = "{{ .TestName }}"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
{{ range $fn := .Fns }}func {{ $fn }}(t *testing.T){ fstests.{{ $fn }}(t) }
|
||||
{{ end }}
|
||||
`
|
||||
|
||||
// Generate test file piping it through gofmt
|
||||
func generateTestProgram(t *template.Template, fns []string, Fsname string) {
|
||||
fsname := strings.ToLower(Fsname)
|
||||
TestName := "Test" + Fsname + ":"
|
||||
outfile := "../../" + fsname + "/" + fsname + "_test.go"
|
||||
// Find last capitalised group to be object name
|
||||
matcher := regexp.MustCompile(`([A-Z][a-z0-9]+)$`)
|
||||
matches := matcher.FindStringSubmatch(Fsname)
|
||||
if len(matches) == 0 {
|
||||
log.Fatalf("Couldn't find object name in %q", Fsname)
|
||||
}
|
||||
ObjectName := matches[1]
|
||||
|
||||
if fsname == "local" {
|
||||
TestName = ""
|
||||
}
|
||||
|
||||
data := Data{
|
||||
Regenerate: "make gen_tests",
|
||||
FsName: fsname,
|
||||
UpperFsName: Fsname,
|
||||
TestName: TestName,
|
||||
ObjectName: ObjectName,
|
||||
Fns: fns,
|
||||
}
|
||||
|
||||
cmd := exec.Command("gofmt")
|
||||
|
||||
log.Printf("Writing %q", outfile)
|
||||
out, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.Stdout = out
|
||||
|
||||
gofmt, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = t.Execute(gofmt, data); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = gofmt.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = cmd.Wait(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err = out.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
fns := findTestFunctions()
|
||||
t := template.Must(template.New("main").Parse(testProgram))
|
||||
generateTestProgram(t, fns, "Local")
|
||||
generateTestProgram(t, fns, "Swift")
|
||||
generateTestProgram(t, fns, "S3")
|
||||
generateTestProgram(t, fns, "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage")
|
||||
generateTestProgram(t, fns, "Dropbox")
|
||||
log.Printf("Done")
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"code.google.com/p/goauth2/oauth"
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -82,7 +81,7 @@ func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
|
||||
|
||||
t := &oauth.Transport{
|
||||
Config: config,
|
||||
Transport: http.DefaultTransport,
|
||||
Transport: fs.Config.Transport(),
|
||||
}
|
||||
|
||||
return t, nil
|
||||
|
||||
@@ -17,15 +17,14 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.google.com/p/google-api-go-client/googleapi"
|
||||
"code.google.com/p/google-api-go-client/storage/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/googleauth"
|
||||
@@ -34,8 +33,8 @@ import (
|
||||
const (
|
||||
rcloneClientId = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
|
||||
RFC3339In = time.RFC3339
|
||||
RFC3339Out = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 256 // chunk size to read directory listings
|
||||
)
|
||||
@@ -43,7 +42,7 @@ const (
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageAuth = &googleauth.Auth{
|
||||
Scope: storage.DevstorageFull_controlScope,
|
||||
Scope: storage.DevstorageFullControlScope,
|
||||
DefaultClientId: rcloneClientId,
|
||||
DefaultClientSecret: rcloneClientSecret,
|
||||
}
|
||||
@@ -113,6 +112,7 @@ func init() {
|
||||
|
||||
// FsStorage represents a remote storage server
|
||||
type FsStorage struct {
|
||||
name string // name of this remote
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
@@ -136,6 +136,11 @@ type FsObjectStorage struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsStorage) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsStorage to a string
|
||||
func (f *FsStorage) String() string {
|
||||
if f.root == "" {
|
||||
@@ -172,6 +177,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &FsStorage{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
|
||||
@@ -215,7 +221,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) NewFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
o := &FsObjectStorage{
|
||||
storage: f,
|
||||
remote: remote,
|
||||
@@ -236,7 +242,7 @@ func (f *FsStorage) NewFsObjectWithInfo(remote string, info *storage.Object) fs.
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -252,19 +258,26 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
objects, err := list.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
return
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if directories && !strings.HasSuffix(object.Name, "/") {
|
||||
continue
|
||||
if !directories {
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
fn(remote, object)
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
} else {
|
||||
var object storage.Object
|
||||
for _, prefix := range objects.Prefixes {
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
continue
|
||||
}
|
||||
fn(prefix[:len(prefix)-1], &object)
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
fn(remote, object)
|
||||
}
|
||||
if objects.NextPageToken == "" {
|
||||
break
|
||||
@@ -280,13 +293,13 @@ func (f *FsStorage) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *storage.Object) {
|
||||
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
})
|
||||
@@ -304,7 +317,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
defer close(out)
|
||||
if f.projectNumber == "" {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list buckets without project number")
|
||||
fs.ErrorLog(f, "Can't list buckets without project number")
|
||||
return
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
@@ -312,7 +325,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
buckets, err := listBuckets.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
|
||||
break
|
||||
} else {
|
||||
for _, bucket := range buckets.Items {
|
||||
@@ -352,8 +365,8 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
fs := &FsObjectStorage{storage: f, remote: remote}
|
||||
return fs, fs.Update(in, modTime, size)
|
||||
o := &FsObjectStorage{storage: f, remote: remote}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
@@ -388,6 +401,38 @@ func (fs *FsStorage) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectStorage)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := &FsObjectStorage{storage: f, remote: remote}
|
||||
|
||||
srcBucket := srcObj.storage.bucket
|
||||
srcObject := srcObj.storage.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -434,7 +479,7 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(RFC3339In, mtimeString)
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
@@ -444,7 +489,7 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(RFC3339In, info.Updated)
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
if err != nil {
|
||||
fs.Log(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
@@ -484,7 +529,7 @@ func (o *FsObjectStorage) ModTime() time.Time {
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(RFC3339Out)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
return metadata
|
||||
}
|
||||
|
||||
@@ -496,11 +541,12 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
|
||||
Name: o.storage.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
_, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
|
||||
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
o.setMetaData(newObject)
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
@@ -530,7 +576,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, fmt.Errorf("Bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
@@ -540,24 +586,21 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Guess the content type
|
||||
contentType := mime.TypeByExtension(path.Ext(o.remote))
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: o.storage.bucket,
|
||||
Name: o.storage.root + o.remote,
|
||||
ContentType: contentType,
|
||||
ContentType: fs.MimeType(o),
|
||||
Size: uint64(size),
|
||||
Updated: modTime.Format(RFC3339Out), // Doesn't get set
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
o.setMetaData(newObject)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -567,4 +610,5 @@ func (o *FsObjectStorage) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsStorage{}
|
||||
var _ fs.Copier = &FsStorage{}
|
||||
var _ fs.Object = &FsObjectStorage{}
|
||||
|
||||
54
googlecloudstorage/googlecloudstorage_test.go
Normal file
54
googlecloudstorage/googlecloudstorage_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/googlecloudstorage"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
|
||||
fstests.RemoteName = "TestGoogleCloudStorage:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
104
local/local.go
104
local/local.go
@@ -1,6 +1,11 @@
|
||||
// Local filesystem interface
|
||||
package local
|
||||
|
||||
// Note that all rclone paths should be / separated. Anything coming
|
||||
// from the filepath module will have \ separators on windows so
|
||||
// should be converted using filepath.ToSlash. Windows is quite happy
|
||||
// with / separators so there is no need to convert them back.
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
@@ -8,12 +13,12 @@ import (
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
@@ -28,14 +33,16 @@ func init() {
|
||||
|
||||
// FsLocal represents a local filesystem rooted at root
|
||||
type FsLocal struct {
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
name string // the name of the remote
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
}
|
||||
|
||||
// FsObjectLocal represents a local filesystem object
|
||||
type FsObjectLocal struct {
|
||||
local fs.Fs // The Fs this object is part of
|
||||
local *FsLocal // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
path string // The local path
|
||||
info os.FileInfo // Interface for file info (always present)
|
||||
@@ -46,8 +53,12 @@ type FsObjectLocal struct {
|
||||
|
||||
// NewFs contstructs an FsLocal from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = path.Clean(root)
|
||||
f := &FsLocal{root: root}
|
||||
root = filepath.ToSlash(path.Clean(root))
|
||||
f := &FsLocal{
|
||||
name: name,
|
||||
root: root,
|
||||
warned: make(map[string]struct{}),
|
||||
}
|
||||
// Check to see if this points to a file
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
@@ -61,6 +72,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsLocal) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsLocal to a string
|
||||
func (f *FsLocal) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
@@ -69,8 +85,9 @@ func (f *FsLocal) String() string {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
path := filepath.Join(f.root, remote)
|
||||
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
remote = filepath.ToSlash(remote)
|
||||
path := path.Join(f.root, remote)
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: path}
|
||||
if info != nil {
|
||||
o.info = info
|
||||
@@ -88,7 +105,7 @@ func (f *FsLocal) NewFsObjectWithInfo(remote string, info os.FileInfo) fs.Object
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// List the path returning a channel of FsObjects
|
||||
@@ -100,19 +117,19 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
remote, err := filepath.Rel(f.root, path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Failed to get relative path %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
if remote == "." {
|
||||
return nil
|
||||
// remote = ""
|
||||
}
|
||||
if fs := f.NewFsObjectWithInfo(remote, fi); fs != nil {
|
||||
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
|
||||
if fs.Storable() {
|
||||
out <- fs
|
||||
}
|
||||
@@ -122,13 +139,27 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Failed to open directory: %s: %s", f.root, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// CleanUtf8 makes string a valid UTF-8 string
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
func (f *FsLocal) cleanUtf8(name string) string {
|
||||
if utf8.ValidString(name) {
|
||||
return name
|
||||
}
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
return string([]rune(name))
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsLocal) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
@@ -137,12 +168,12 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
items, err := ioutil.ReadDir(f.root)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Couldn't find read directory: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
|
||||
} else {
|
||||
for _, item := range items {
|
||||
if item.IsDir() {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Name(),
|
||||
Name: f.cleanUtf8(item.Name()),
|
||||
When: item.ModTime(),
|
||||
Bytes: 0,
|
||||
Count: 0,
|
||||
@@ -152,7 +183,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
dir.Count += 1
|
||||
dir.Bytes += fi.Size()
|
||||
@@ -161,7 +192,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Printf("Failed to open directory: %s: %s", dirpath, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
|
||||
}
|
||||
out <- dir
|
||||
}
|
||||
@@ -174,7 +205,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
|
||||
// Puts the FsObject to the local filesystem
|
||||
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
dstPath := filepath.Join(f.root, remote)
|
||||
dstPath := path.Join(f.root, remote)
|
||||
// Temporary FsObject under construction - info filled in by Update()
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
|
||||
err := o.Update(in, modTime, size)
|
||||
@@ -186,7 +217,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *FsLocal) Mkdir() error {
|
||||
return os.MkdirAll(f.root, 0770)
|
||||
return os.MkdirAll(f.root, 0777)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory
|
||||
@@ -218,12 +249,15 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
}
|
||||
path := fd.Name()
|
||||
// fmt.Println("Created temp file", path)
|
||||
fd.Close()
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Delete it on return
|
||||
defer func() {
|
||||
// fmt.Println("Remove temp file")
|
||||
os.Remove(path)
|
||||
_ = os.Remove(path) // ignore error
|
||||
}()
|
||||
|
||||
// Find the minimum duration we can detect
|
||||
@@ -259,6 +293,13 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsLocal) Purge() error {
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return fmt.Errorf("Can't Purge non directory: %q", f.root)
|
||||
}
|
||||
return os.RemoveAll(f.root)
|
||||
}
|
||||
|
||||
@@ -279,7 +320,7 @@ func (o *FsObjectLocal) String() string {
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectLocal) Remote() string {
|
||||
return o.remote
|
||||
return o.local.cleanUtf8(o.remote)
|
||||
}
|
||||
|
||||
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
|
||||
@@ -290,7 +331,7 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to open: %s", err)
|
||||
fs.ErrorLog(o, "Failed to open: %s", err)
|
||||
return "", err
|
||||
}
|
||||
hash := md5.New()
|
||||
@@ -298,12 +339,12 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read: %s", err)
|
||||
return "", err
|
||||
}
|
||||
if closeErr != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to close: %s", closeErr)
|
||||
fs.ErrorLog(o, "Failed to close: %s", closeErr)
|
||||
return "", closeErr
|
||||
}
|
||||
o.md5sum = hex.EncodeToString(hash.Sum(nil))
|
||||
@@ -325,6 +366,13 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to set mtime on file: %s", err)
|
||||
return
|
||||
}
|
||||
// Re-read metadata
|
||||
err = o.lstat()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to stat: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,7 +383,7 @@ func (o *FsObjectLocal) Storable() bool {
|
||||
fs.Debug(o, "Can't transfer non file/directory")
|
||||
return false
|
||||
} else if mode&os.ModeDir != 0 {
|
||||
fs.Debug(o, "FIXME Skipping directory")
|
||||
// fs.Debug(o, "Skipping directory")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -388,7 +436,7 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object from in with modTime and size
|
||||
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
dir := path.Dir(o.path)
|
||||
err := os.MkdirAll(dir, 0770)
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
54
local/local_test.go
Normal file
54
local/local_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test Local filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/local"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
|
||||
fstests.RemoteName = ""
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
79
make_manual.py
Executable file
79
make_manual.py
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
outfile = "MANUAL.md"
|
||||
|
||||
# Order to add docs segments to make outfile
|
||||
docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
"swift.md",
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
"faq.md",
|
||||
"licence.md",
|
||||
"authors.md",
|
||||
"contact.md",
|
||||
]
|
||||
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
"""Read file as a string"""
|
||||
path = os.path.join(docpath, doc)
|
||||
with open(path) as fd:
|
||||
contents = fd.read()
|
||||
parts = contents.split("---\n", 2)
|
||||
if len(parts) != 3:
|
||||
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||
contents = parts[2].strip()+"\n\n"
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
|
||||
return contents
|
||||
|
||||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
out.write(read_doc(doc))
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
62
notes.txt
62
notes.txt
@@ -1,3 +1,26 @@
|
||||
Perhaps make Md5sum() and Modtime() optional. Define the zero values
|
||||
"" and 0. Make it so we can support remotes which can't do those.
|
||||
|
||||
Fix the docs
|
||||
* factor the README.md into the docs directory
|
||||
* create it as part of make by assembling other parts
|
||||
* write long docs about each flag
|
||||
|
||||
Change lsd command so it doesn't show -1
|
||||
* Make sure all Fses show -1 for objects Zero for dates etc
|
||||
* Make test?
|
||||
|
||||
Put the TestRemote names into the Fs description
|
||||
Make test_all.sh use the TestRemote name automatically
|
||||
|
||||
Run errcheck and go vet in the make file
|
||||
.. Also race detector?
|
||||
.. go tool vet -shadow
|
||||
|
||||
Get rid of Storable?
|
||||
|
||||
Write developer manual
|
||||
|
||||
Todo
|
||||
* FIXME: More -dry-run checks for object transfer
|
||||
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
|
||||
@@ -5,24 +28,23 @@ Todo
|
||||
* if object.PseudoDirectory {
|
||||
* fmt.Printf("%9s %19s %s\n", "Directory", "-", fs.Remote())
|
||||
* Make Account wrapper
|
||||
* limit bandwidth for a pool of all individual connectinos
|
||||
* do timeouts by setting a limit, seeing whether io has happened
|
||||
and resetting it if it has
|
||||
* make Account do progress meter
|
||||
* Make logging controllable with flags (mostly done)
|
||||
* -timeout: Make all timeouts be settable with command line parameters
|
||||
* Windows paths? Do we need to translate / and \?
|
||||
* Make a fs.Errorf and count errors and log them at a different level
|
||||
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
|
||||
* tie into -max-size flag
|
||||
* FIXME Make NewFs to return err.IsAnObject so can put the LimitedFs
|
||||
creation in common code? Or try for as much as possible?
|
||||
* FIXME Account all the transactons (ls etc) using a different
|
||||
Roundtripper wrapper which wraps the transactions?
|
||||
* FIXME write tests for local file system
|
||||
* FIXME implement tests for single file operations in rclonetest
|
||||
* Need to make directory objects otherwise can't upload an empty directory
|
||||
* Or could upload empty directories only?
|
||||
|
||||
More rsync features
|
||||
* include
|
||||
* exclude
|
||||
* max size
|
||||
* -c, --checksum skip based on checksum, not mod-time & size
|
||||
|
||||
Ideas for flags
|
||||
* --retries N flag which would make rclone retry a sync until successful or it tried N times.
|
||||
|
||||
Ideas
|
||||
* could do encryption - put IV into metadata?
|
||||
@@ -35,26 +57,6 @@ Ideas
|
||||
* control times sync (which is slow with some remotes) with -a --archive flag?
|
||||
* Copy a glob pattern - could do with LimitedFs
|
||||
|
||||
s3
|
||||
* Can maybe set last modified?
|
||||
* https://forums.aws.amazon.com/message.jspa?messageID=214062
|
||||
* Otherwise can set metadata
|
||||
* Returns etag and last modified in bucket list
|
||||
|
||||
Bugs
|
||||
* Non verbose - not sure number transferred got counted up? CHECK
|
||||
* When doing copy it recurses the whole of the destination FS which isn't necessary
|
||||
|
||||
Making a release
|
||||
* go install -v ./...
|
||||
* go test ./...
|
||||
* rclonetest/test.sh
|
||||
* make tag
|
||||
* edit README.md
|
||||
* git commit fs/version.go README.md docs/content/downloads.md
|
||||
* make retag
|
||||
* . ~/bin/go-cross
|
||||
* make cross
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git push --tags origin master
|
||||
|
||||
186
rclone.go
186
rclone.go
@@ -28,18 +28,21 @@ import (
|
||||
var (
|
||||
// Flags
|
||||
cpuprofile = pflag.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats")
|
||||
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
|
||||
version = pflag.BoolP("version", "V", false, "Print the version number")
|
||||
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
|
||||
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
)
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
Help string
|
||||
ArgsHelp string
|
||||
Run func(fdst, fsrc fs.Fs)
|
||||
Run func(fdst, fsrc fs.Fs) error
|
||||
MinArgs int
|
||||
MaxArgs int
|
||||
NoStats bool
|
||||
Retry bool
|
||||
}
|
||||
|
||||
// checkArgs checks there are enough arguments and prints a message if not
|
||||
@@ -58,149 +61,141 @@ func (cmd *Command) checkArgs(args []string) {
|
||||
var Commands = []Command{
|
||||
{
|
||||
Name: "copy",
|
||||
ArgsHelp: "source://path dest://path",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to copy: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "sync",
|
||||
ArgsHelp: "source://path dest://path",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Sync the source to the destination. Doesn't transfer
|
||||
unchanged files, testing first by modification time then by
|
||||
MD5SUM. Deletes any files that exist in source that don't
|
||||
exist in destination. Since this can cause data loss, test
|
||||
first with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sync: %v", err)
|
||||
}
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Sync(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "move",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Moves the source to the destination. This is equivalent to a
|
||||
copy followed by a purge, but may use server side operations
|
||||
to speed it up. Since this can cause data loss, test first
|
||||
with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.MoveDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
ArgsHelp: "[remote://path]",
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
List all the objects in the the path with size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.List(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.List(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "lsd",
|
||||
ArgsHelp: "[remote://path]",
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
List all directories/containers/buckets in the the path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListDir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to listdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListDir(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "lsl",
|
||||
ArgsHelp: "[remote://path]",
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
List all the objects in the the path with modification time, size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListLong(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list long: %v", err)
|
||||
}
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListLong(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "md5sum",
|
||||
ArgsHelp: "[remote://path]",
|
||||
ArgsHelp: "[remote:path]",
|
||||
Help: `
|
||||
Produces an md5sum file for all the objects in the path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Md5sum(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
}
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Md5sum(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "mkdir",
|
||||
ArgsHelp: "remote://path",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Make the path if it doesn't already exist`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Mkdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to mkdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Mkdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "rmdir",
|
||||
ArgsHelp: "remote://path",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Rmdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to rmdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Rmdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "purge",
|
||||
ArgsHelp: "remote://path",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Remove the path and all of its contents.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Purge(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to purge: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Purge(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
ArgsHelp: "source://path dest://path",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Check(fdst, fsrc)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Check(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
@@ -209,8 +204,9 @@ var Commands = []Command{
|
||||
Name: "config",
|
||||
Help: `
|
||||
Enter an interactive configuration session.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
fs.EditConfig()
|
||||
return nil
|
||||
},
|
||||
NoStats: true,
|
||||
},
|
||||
@@ -240,7 +236,8 @@ Subcommands:
|
||||
fmt.Fprintf(os.Stderr, "Options:\n")
|
||||
pflag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'up' for 'upload'.
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'up'
|
||||
for 'upload'.
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -265,7 +262,11 @@ func ParseFlags() {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.StartCPUProfile(f)
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
}
|
||||
@@ -319,6 +320,9 @@ func NewFs(remote string) fs.Fs {
|
||||
|
||||
// Print the stats every statsInterval
|
||||
func StartStats() {
|
||||
if *statsInterval <= 0 {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
ch := time.Tick(*statsInterval)
|
||||
for {
|
||||
@@ -336,6 +340,17 @@ func main() {
|
||||
}
|
||||
command, args := ParseCommand()
|
||||
|
||||
// Log file output
|
||||
if *logFile != "" {
|
||||
f, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open log file: %v", err)
|
||||
}
|
||||
f.Seek(0, os.SEEK_END)
|
||||
log.SetOutput(f)
|
||||
redirectStderr(f)
|
||||
}
|
||||
|
||||
// Make source and destination fs
|
||||
var fdst, fsrc fs.Fs
|
||||
if len(args) >= 1 {
|
||||
@@ -354,12 +369,29 @@ func main() {
|
||||
|
||||
// Run the actual command
|
||||
if command.Run != nil {
|
||||
command.Run(fdst, fsrc)
|
||||
if !command.NoStats {
|
||||
fmt.Println(fs.Stats)
|
||||
var err error
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = command.Run(fdst, fsrc)
|
||||
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
||||
}
|
||||
if try < *retries {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to %s: %v", command.Name, err)
|
||||
}
|
||||
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fmt.Fprintln(os.Stderr, fs.Stats)
|
||||
}
|
||||
if fs.Config.Verbose {
|
||||
log.Printf("*** Go routines at exit %d\n", runtime.NumGoroutine())
|
||||
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
|
||||
}
|
||||
if fs.Stats.Errored() {
|
||||
os.Exit(1)
|
||||
|
||||
@@ -1,419 +0,0 @@
|
||||
// Test rclone by doing real transactions to a storage provider to and
|
||||
// from the local disk
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ogier/pflag"
|
||||
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
localName, remoteName string
|
||||
version = pflag.BoolP("version", "V", false, "Print the version number")
|
||||
subDir = pflag.BoolP("subdir", "S", false, "Test with a sub directory")
|
||||
)
|
||||
|
||||
// Represents an item for checking
|
||||
type Item struct {
|
||||
Path string
|
||||
Md5sum string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Represents all items for checking
|
||||
type Items struct {
|
||||
byName map[string]*Item
|
||||
items []Item
|
||||
}
|
||||
|
||||
// Make an Items
|
||||
func NewItems(items []Item) *Items {
|
||||
is := &Items{
|
||||
byName: make(map[string]*Item),
|
||||
items: items,
|
||||
}
|
||||
// Fill up byName
|
||||
for i := range items {
|
||||
is.byName[items[i].Path] = &items[i]
|
||||
}
|
||||
return is
|
||||
}
|
||||
|
||||
// Check off an item
|
||||
func (is *Items) Find(obj fs.Object) {
|
||||
i, ok := is.byName[obj.Remote()]
|
||||
if !ok {
|
||||
log.Fatalf("Unexpected file %q", obj.Remote())
|
||||
}
|
||||
delete(is.byName, obj.Remote())
|
||||
// Check attributes
|
||||
Md5sum, err := obj.Md5sum()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
|
||||
}
|
||||
if i.Md5sum != Md5sum {
|
||||
log.Fatalf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
|
||||
}
|
||||
if i.Size != obj.Size() {
|
||||
log.Fatalf("%s: Size incorrect - expecting %d got %d", obj.Remote(), i.Size, obj.Size())
|
||||
}
|
||||
// check the mod time to the given precision
|
||||
modTime := obj.ModTime()
|
||||
dt := modTime.Sub(i.ModTime)
|
||||
if dt >= fs.Config.ModifyWindow || dt <= -fs.Config.ModifyWindow {
|
||||
log.Fatalf("%s: Modification time difference too big |%s| > %s (%s vs %s)", obj.Remote(), dt, fs.Config.ModifyWindow, modTime, i.ModTime)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Check all done
|
||||
func (is *Items) Done() {
|
||||
if len(is.byName) != 0 {
|
||||
for name := range is.byName {
|
||||
log.Printf("Not found %q", name)
|
||||
}
|
||||
log.Fatalf("%d objects not found", len(is.byName))
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
func CheckListing(f fs.Fs, items []Item) {
|
||||
is := NewItems(items)
|
||||
for obj := range f.List() {
|
||||
is.Find(obj)
|
||||
}
|
||||
is.Done()
|
||||
}
|
||||
|
||||
// Parse a time string or explode
|
||||
func Time(timeString string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse time %q: %v", timeString, err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Write a file
|
||||
func WriteFile(filePath, content string, t time.Time) {
|
||||
// FIXME make directories?
|
||||
filePath = path.Join(localName, filePath)
|
||||
dirPath := path.Dir(filePath)
|
||||
err := os.MkdirAll(dirPath, 0770)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directories %q: %v", dirPath, err)
|
||||
}
|
||||
err = ioutil.WriteFile(filePath, []byte(content), 0600)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write file %q: %v", filePath, err)
|
||||
}
|
||||
err = os.Chtimes(filePath, t, t)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to chtimes file %q: %v", filePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a random string
|
||||
func RandomString(n int) string {
|
||||
source := "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
out := make([]byte, n)
|
||||
for i := range out {
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func TestMkdir(flocal, fremote fs.Fs) {
|
||||
err := fs.Mkdir(fremote)
|
||||
if err != nil {
|
||||
log.Fatalf("Mkdir failed: %v", err)
|
||||
}
|
||||
items := []Item{}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
}
|
||||
|
||||
var t1 = Time("2001-02-03T04:05:06.499999999Z")
|
||||
var t2 = Time("2011-12-25T12:59:59.123456789Z")
|
||||
var t3 = Time("2011-12-30T12:59:59.000000000Z")
|
||||
|
||||
func TestCopy(flocal, fremote fs.Fs) {
|
||||
WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
// Check dry run is working
|
||||
log.Printf("Copy with --dry-run")
|
||||
fs.Config.DryRun = true
|
||||
err := fs.Sync(fremote, flocal, false)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
log.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, []Item{})
|
||||
|
||||
// Now without dry run
|
||||
|
||||
log.Printf("Copy")
|
||||
err = fs.Sync(fremote, flocal, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
|
||||
// Now delete the local file and download it
|
||||
|
||||
err = os.Remove(localName + "/sub dir/hello world")
|
||||
if err != nil {
|
||||
log.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
|
||||
CheckListing(flocal, []Item{})
|
||||
CheckListing(fremote, items)
|
||||
|
||||
log.Printf("Copy - redownload")
|
||||
err = fs.Sync(flocal, fremote, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
|
||||
// Clean the directory
|
||||
cleanTempDir()
|
||||
}
|
||||
|
||||
func TestSync(flocal, fremote fs.Fs) {
|
||||
WriteFile("empty space", "", t1)
|
||||
|
||||
log.Printf("Sync after changing file modtime only")
|
||||
err := os.Chtimes(localName+"/empty space", t2, t2)
|
||||
if err != nil {
|
||||
log.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
log.Printf("Sync after adding a file")
|
||||
WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items = []Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 60, ModTime: t3, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
log.Printf("Sync after changing a file's size only")
|
||||
WriteFile("potato", "smaller but same date", t3)
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items = []Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
|
||||
}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
log.Printf("Sync after removing a file and adding a file --dry-run")
|
||||
WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
err = os.Remove(localName + "/potato")
|
||||
if err != nil {
|
||||
log.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
fs.Config.DryRun = true
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
log.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
before := []Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato", Size: 21, ModTime: t3, Md5sum: "100defcf18c42a1e0dc42a789b107cd2"},
|
||||
}
|
||||
items = []Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, before)
|
||||
|
||||
log.Printf("Sync after removing a file and adding a file")
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
CheckListing(flocal, items)
|
||||
CheckListing(fremote, items)
|
||||
}
|
||||
|
||||
func TestLs(flocal, fremote fs.Fs) {
|
||||
// Underlying List has been tested above, so we just make sure it runs
|
||||
err := fs.List(fremote)
|
||||
if err != nil {
|
||||
log.Fatalf("List failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLsd(flocal, fremote fs.Fs) {
|
||||
}
|
||||
|
||||
func TestCheck(flocal, fremote fs.Fs) {
|
||||
}
|
||||
|
||||
func TestPurge(fremote fs.Fs) {
|
||||
err := fs.Purge(fremote)
|
||||
if err != nil {
|
||||
log.Fatalf("Purge failed: %v", err)
|
||||
}
|
||||
unexpected := 0
|
||||
for obj := range fremote.List() {
|
||||
unexpected++
|
||||
log.Printf("Found unexpected item %s", obj.Remote())
|
||||
}
|
||||
if unexpected != 0 {
|
||||
log.Fatalf("exiting as found %d unexpected items", unexpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRmdir(flocal, fremote fs.Fs) {
|
||||
err := fs.Rmdir(fremote)
|
||||
if err != nil {
|
||||
log.Fatalf("Rmdir failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func syntaxError() {
|
||||
fmt.Fprintf(os.Stderr, `Test rclone with a remote to find bugs in either - %s.
|
||||
|
||||
Syntax: [options] remote:
|
||||
|
||||
Need a remote: as argument. This will create a random container or
|
||||
directory under it and perform tests on it, deleting it at the end.
|
||||
|
||||
Options:
|
||||
|
||||
`, fs.Version)
|
||||
pflag.PrintDefaults()
|
||||
}
|
||||
|
||||
// Clean the temporary directory
|
||||
func cleanTempDir() {
|
||||
log.Printf("Cleaning temporary directory: %q", localName)
|
||||
err := os.RemoveAll(localName)
|
||||
if err != nil {
|
||||
log.Printf("Failed to remove %q: %v", localName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
pflag.Usage = syntaxError
|
||||
pflag.Parse()
|
||||
if *version {
|
||||
fmt.Printf("rclonetest %s\n", fs.Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
fs.LoadConfig()
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
args := pflag.Args()
|
||||
|
||||
if len(args) != 1 {
|
||||
syntaxError()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
remoteName = args[0]
|
||||
if !strings.HasSuffix(remoteName, ":") {
|
||||
remoteName += "/"
|
||||
}
|
||||
remoteName += RandomString(32)
|
||||
var parentRemote fs.Fs
|
||||
if *subDir {
|
||||
var err error
|
||||
parentRemote, err = fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make parent %q: %v", remoteName, err)
|
||||
}
|
||||
remoteName += "/" + RandomString(8)
|
||||
}
|
||||
log.Printf("Testing with remote %q", remoteName)
|
||||
var err error
|
||||
localName, err = ioutil.TempDir("", "rclone")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
log.Printf("Testing with local %q", localName)
|
||||
|
||||
fremote, err := fs.NewFs(remoteName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make %q: %v", remoteName, err)
|
||||
}
|
||||
flocal, err := fs.NewFs(localName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make %q: %v", remoteName, err)
|
||||
}
|
||||
|
||||
fs.CalculateModifyWindow(fremote, flocal)
|
||||
|
||||
TestMkdir(flocal, fremote)
|
||||
TestCopy(flocal, fremote)
|
||||
TestSync(flocal, fremote)
|
||||
TestLs(flocal, fremote)
|
||||
TestLsd(flocal, fremote)
|
||||
TestCheck(flocal, fremote)
|
||||
TestPurge(fremote)
|
||||
//TestRmdir(flocal, fremote)
|
||||
|
||||
if parentRemote != nil {
|
||||
TestPurge(parentRemote)
|
||||
}
|
||||
|
||||
cleanTempDir()
|
||||
log.Printf("Tests OK")
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
go install
|
||||
|
||||
REMOTES="
|
||||
memstore:
|
||||
s3:
|
||||
drive2:
|
||||
gcs:
|
||||
dropbox:
|
||||
/tmp/z
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
args=$@
|
||||
rclonetest $args || {
|
||||
echo "*** rclonetest $args FAILED ***"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
for remote in $REMOTES; do
|
||||
test_remote $remote
|
||||
test_remote --subdir $remote
|
||||
done
|
||||
15
redirect_stderr.go
Normal file
15
redirect_stderr.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Log the panic to the log file - for oses which can't do this
|
||||
|
||||
//+build !windows,!unix
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
log.Printf("Can't redirect stderr to file")
|
||||
}
|
||||
19
redirect_stderr_unix.go
Normal file
19
redirect_stderr_unix.go
Normal file
@@ -0,0 +1,19 @@
|
||||
// Log the panic under unix to the log file
|
||||
|
||||
//+build unix
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to redirect stderr to file: %v", err)
|
||||
}
|
||||
}
|
||||
39
redirect_stderr_windows.go
Normal file
39
redirect_stderr_windows.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Log the panic under windows to the log file
|
||||
//
|
||||
// Code from minix, via
|
||||
//
|
||||
// http://play.golang.org/p/kLtct7lSUg
|
||||
|
||||
//+build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel32 = syscall.MustLoadDLL("kernel32.dll")
|
||||
procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
|
||||
)
|
||||
|
||||
func setStdHandle(stdhandle int32, handle syscall.Handle) error {
|
||||
r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
|
||||
if r0 == 0 {
|
||||
if e1 != 0 {
|
||||
return error(e1)
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// redirectStderr to the file passed in
|
||||
func redirectStderr(f *os.File) {
|
||||
err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to redirect stderr to file: %v", err)
|
||||
}
|
||||
}
|
||||
439
s3/s3.go
439
s3/s3.go
@@ -3,20 +3,32 @@ package s3
|
||||
|
||||
// FIXME need to prevent anything but ListDir working for s3://
|
||||
|
||||
/*
|
||||
Progress of port to aws-sdk
|
||||
|
||||
* Don't really need o.meta at all?
|
||||
|
||||
What happens if you CTRL-C a multipart upload
|
||||
* get an incomplete upload
|
||||
* disappears when you delete the bucket
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/goamz/aws"
|
||||
"github.com/ncw/goamz/s3"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
@@ -34,39 +46,48 @@ func init() {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password). ",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.",
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://s3.amazonaws.com/",
|
||||
Value: "us-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-external-1.amazonaws.com",
|
||||
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-2.amazonaws.com",
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-1.amazonaws.com",
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-eu-west-1.amazonaws.com",
|
||||
Value: "eu-west-1",
|
||||
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-1.amazonaws.com",
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-2.amazonaws.com",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
|
||||
Value: "ap-southeast-2",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
|
||||
}, {
|
||||
Value: "https://s3-ap-northeast-1.amazonaws.com",
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-sa-east-1.amazonaws.com",
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
|
||||
}, {
|
||||
Value: "other-v4-signature",
|
||||
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Endpoint.",
|
||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
|
||||
@@ -101,16 +122,19 @@ func init() {
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
listChunkSize = 1024 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
)
|
||||
|
||||
// FsS3 represents a remote s3 server
|
||||
type FsS3 struct {
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
b *s3.Bucket // the connection to the bucket
|
||||
bucket string // the bucket we are working on
|
||||
perm s3.ACL // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
name string // the name of the remote
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
locationConstraint string // location constraint of new buckets
|
||||
}
|
||||
|
||||
// FsObjectS3 describes a s3 object
|
||||
@@ -119,16 +143,21 @@ type FsObjectS3 struct {
|
||||
//
|
||||
// List will read everything but meta - to fill that in need to call
|
||||
// readMetaData
|
||||
s3 *FsS3 // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta s3.Headers // The object metadata if known - may be nil
|
||||
s3 *FsS3 // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsS3) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsS3 to a string
|
||||
func (f *FsS3) String() string {
|
||||
if f.root == "" {
|
||||
@@ -163,27 +192,42 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
if secretAccessKey == "" {
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
|
||||
auth := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
|
||||
|
||||
// FIXME look through all the regions by name and use one of them if found
|
||||
|
||||
// Synthesize the region
|
||||
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
if s3Endpoint == "" {
|
||||
s3Endpoint = "https://s3.amazonaws.com/"
|
||||
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
region := fs.ConfigFile.MustValue(name, "region")
|
||||
if region == "" && endpoint == "" {
|
||||
endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
region := aws.Region{
|
||||
Name: "s3",
|
||||
S3Endpoint: s3Endpoint,
|
||||
S3LocationConstraint: false,
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
|
||||
if s3LocationConstraint != "" {
|
||||
region.Name = s3LocationConstraint
|
||||
region.S3LocationConstraint = true
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(auth).
|
||||
WithEndpoint(endpoint).
|
||||
WithHTTPClient(fs.Config.Client()).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
c := s3.New(awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debug(name, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(accessKeyId, secretAccessKey, req.HTTPRequest)
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
}
|
||||
|
||||
c := s3.New(auth, region)
|
||||
// Add user agent
|
||||
c.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
|
||||
})
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -198,16 +242,21 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsS3{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
b: c.Bucket(bucket),
|
||||
perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
// FIXME perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.b.Head(directory, nil)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &directory,
|
||||
}
|
||||
_, err = f.c.HeadObject(&req)
|
||||
if err == nil {
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
@@ -221,27 +270,28 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return fs.NewLimited(f, obj), nil
|
||||
}
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
|
||||
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
o := &FsObjectS3{
|
||||
s3: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
var err error
|
||||
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read last modified: %s", err)
|
||||
if info.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *info.LastModified
|
||||
}
|
||||
o.etag = info.ETag
|
||||
o.bytes = info.Size
|
||||
o.etag = aws.StringValue(info.ETag)
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -256,42 +306,71 @@ func (f *FsS3) NewFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
if directories {
|
||||
delimiter = "/"
|
||||
}
|
||||
// FIXME need to implement ALL loop
|
||||
objects, err := f.b.List(f.root, delimiter, "", 10000)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
} else {
|
||||
rootLength := len(f.root)
|
||||
if directories {
|
||||
for _, remote := range objects.CommonPrefixes {
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Log(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := remote[rootLength:]
|
||||
fn(remote, &s3.Key{Key: remote})
|
||||
}
|
||||
var marker *string
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &f.bucket,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &f.root,
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
resp, err := f.c.ListObjects(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
break
|
||||
} else {
|
||||
for i := range objects.Contents {
|
||||
object := &objects.Contents[i]
|
||||
if !strings.HasPrefix(object.Key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Key)
|
||||
continue
|
||||
rootLength := len(f.root)
|
||||
if directories {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
fs.Log(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Log(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[rootLength:]
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
fn(remote, &s3.Object{Key: &remote})
|
||||
}
|
||||
remote := object.Key[rootLength:]
|
||||
fn(remote, object)
|
||||
} else {
|
||||
for _, object := range resp.Contents {
|
||||
key := aws.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote := key[rootLength:]
|
||||
fn(remote, object)
|
||||
}
|
||||
}
|
||||
if !aws.BoolValue(resp.IsTruncated) {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
||||
marker = resp.Contents[len(resp.Contents)-1].Key
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -304,12 +383,12 @@ func (f *FsS3) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *s3.Key) {
|
||||
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
|
||||
f.list(false, func(remote string, object *s3.Object) {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
})
|
||||
@@ -325,15 +404,16 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
// List the buckets
|
||||
go func() {
|
||||
defer close(out)
|
||||
buckets, err := f.c.ListBuckets()
|
||||
req := s3.ListBucketsInput{}
|
||||
resp, err := f.c.ListBuckets(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
|
||||
} else {
|
||||
for _, bucket := range buckets {
|
||||
for _, bucket := range resp.Buckets {
|
||||
out <- &fs.Dir{
|
||||
Name: bucket.Name,
|
||||
When: bucket.CreationDate,
|
||||
Name: aws.StringValue(bucket.Name),
|
||||
When: aws.TimeValue(bucket.CreationDate),
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
@@ -344,10 +424,14 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
// List the directories in the path in the bucket
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(true, func(remote string, object *s3.Key) {
|
||||
f.list(true, func(remote string, object *s3.Object) {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
out <- &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: object.Size,
|
||||
Bytes: size,
|
||||
Count: 0,
|
||||
}
|
||||
})
|
||||
@@ -365,9 +449,18 @@ func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsS3) Mkdir() error {
|
||||
err := f.b.PutBucket(f.perm)
|
||||
if err, ok := err.(*s3.Error); ok {
|
||||
if err.Code == "BucketAlreadyOwnedByYou" {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.perm,
|
||||
}
|
||||
if f.locationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.locationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -378,7 +471,11 @@ func (f *FsS3) Mkdir() error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsS3) Rmdir() error {
|
||||
return f.b.DelBucket()
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
@@ -386,6 +483,37 @@ func (f *FsS3) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectS3)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.s3
|
||||
key := f.root + remote
|
||||
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
_, err := f.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -406,9 +534,17 @@ func (o *FsObjectS3) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectS3) Md5sum() (string, error) {
|
||||
return strings.Trim(strings.ToLower(o.etag), `"`), nil
|
||||
etag := strings.Trim(strings.ToLower(o.etag), `"`)
|
||||
// Check the etag is a valid md5sum
|
||||
if !matchMd5.MatchString(etag) {
|
||||
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
||||
return "", nil
|
||||
}
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -423,8 +559,12 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
headers, err := o.s3.b.Head(o.s3.root+o.remote, nil)
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.HeadObject(&req)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -432,19 +572,17 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if contentLength, ok := headers["Content-Length"]; ok {
|
||||
size, err = strconv.ParseInt(contentLength, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read size from: %q", headers)
|
||||
return err
|
||||
}
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
}
|
||||
o.etag = headers["Etag"]
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = headers
|
||||
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
|
||||
o.meta = resp.Metadata
|
||||
if resp.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -461,11 +599,11 @@ func (o *FsObjectS3) ModTime() time.Time {
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok {
|
||||
if !ok || d == nil {
|
||||
// fs.Debug(o, "No metadata")
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
modTime, err := swift.FloatStringToTime(*d)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return o.lastModified
|
||||
@@ -478,14 +616,27 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
key := o.s3.root + o.remote
|
||||
sourceKey := o.s3.bucket + "/" + key
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Key: &key,
|
||||
CopySource: &sourceKey,
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
_, err = o.s3.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -496,37 +647,69 @@ func (o *FsObjectS3) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
|
||||
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
|
||||
return
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.GetObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Set the mtime in the headers
|
||||
headers := s3.Headers{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
opts := s3manager.UploadOptions{
|
||||
// PartSize: 64 * 1024 * 1024, use default
|
||||
Concurrency: 2, // limit concurrency
|
||||
LeavePartsOnError: false,
|
||||
S3: o.s3.c,
|
||||
}
|
||||
uploader := s3manager.NewUploader(&opts)
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
// Guess the content type
|
||||
contentType := mime.TypeByExtension(path.Ext(o.remote))
|
||||
if contentType == "" {
|
||||
contentType = "application/octet-stream"
|
||||
}
|
||||
contentType := fs.MimeType(o)
|
||||
|
||||
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, contentType, o.s3.perm, headers)
|
||||
key := o.s3.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &contentType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
_, err := uploader.Upload(&req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectS3) Remove() error {
|
||||
return o.s3.b.Del(o.s3.root + o.remote)
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.DeleteObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
_, err := o.s3.c.DeleteObject(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsS3{}
|
||||
var _ fs.Copier = &FsS3{}
|
||||
var _ fs.Object = &FsObjectS3{}
|
||||
|
||||
54
s3/s3_test.go
Normal file
54
s3/s3_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test S3 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/s3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
|
||||
fstests.RemoteName = "TestS3:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
115
s3/v2sign.go
Normal file
115
s3/v2sign.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// v2 signing
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"acl": struct{}{},
|
||||
"location": struct{}{},
|
||||
"logging": struct{}{},
|
||||
"notification": struct{}{},
|
||||
"partNumber": struct{}{},
|
||||
"policy": struct{}{},
|
||||
"requestPayment": struct{}{},
|
||||
"torrent": struct{}{},
|
||||
"uploadId": struct{}{},
|
||||
"uploads": struct{}{},
|
||||
"versionId": struct{}{},
|
||||
"versioning": struct{}{},
|
||||
"versions": struct{}{},
|
||||
"response-content-type": struct{}{},
|
||||
"response-content-language": struct{}{},
|
||||
"response-expires": struct{}{},
|
||||
"response-cache-control": struct{}{},
|
||||
"response-content-disposition": struct{}{},
|
||||
"response-content-encoding": struct{}{},
|
||||
}
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
// Look through headers of interest
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "content-md5":
|
||||
md5 = v[0]
|
||||
case "content-type":
|
||||
contentType = v[0]
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
}
|
||||
|
||||
// Look for query parameters which need to be added to the signature
|
||||
params := req.URL.Query()
|
||||
var queriesToSign []string
|
||||
for k, vs := range params {
|
||||
if _, ok := s3ParamsToSign[k]; ok {
|
||||
for _, v := range vs {
|
||||
if v == "" {
|
||||
queriesToSign = append(queriesToSign, k)
|
||||
} else {
|
||||
queriesToSign = append(queriesToSign, k+"="+v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add query parameters to URI
|
||||
if len(queriesToSign) > 0 {
|
||||
sort.StringSlice(queriesToSign).Sort()
|
||||
uri += "?" + strings.Join(queriesToSign, "&")
|
||||
}
|
||||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
}
|
||||
@@ -44,6 +44,12 @@ func init() {
|
||||
Help: "Memset Memstore UK v2",
|
||||
Value: "https://auth.storage.memset.com/v2.0",
|
||||
}},
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional",
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region name - optional",
|
||||
},
|
||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||
},
|
||||
@@ -52,6 +58,7 @@ func init() {
|
||||
|
||||
// FsSwift represents a remote swift server
|
||||
type FsSwift struct {
|
||||
name string // name of this remote
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
root string // the path we are working on if any
|
||||
@@ -69,6 +76,11 @@ type FsObjectSwift struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// The name of the remote (as passed into NewFs)
|
||||
func (f *FsSwift) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// String converts this FsSwift to a string
|
||||
func (f *FsSwift) String() string {
|
||||
if f.root == "" {
|
||||
@@ -107,10 +119,15 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
||||
return nil, errors.New("auth not found")
|
||||
}
|
||||
c := &swift.Connection{
|
||||
UserName: userName,
|
||||
ApiKey: apiKey,
|
||||
AuthUrl: authUrl,
|
||||
UserAgent: fs.UserAgent,
|
||||
UserName: userName,
|
||||
ApiKey: apiKey,
|
||||
AuthUrl: authUrl,
|
||||
UserAgent: fs.UserAgent,
|
||||
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
|
||||
Region: fs.ConfigFile.MustValue(name, "region"),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fs.Config.Transport(),
|
||||
}
|
||||
err := c.Authenticate()
|
||||
if err != nil {
|
||||
@@ -130,6 +147,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsSwift{
|
||||
name: name,
|
||||
c: *c,
|
||||
container: container,
|
||||
root: directory,
|
||||
@@ -157,7 +175,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
fs := &FsObjectSwift{
|
||||
swift: f,
|
||||
remote: remote,
|
||||
@@ -179,7 +197,7 @@ func (f *FsSwift) NewFsObjectWithInfo(remote string, info *swift.Object) fs.Obje
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) NewFsObject(remote string) fs.Object {
|
||||
return f.NewFsObjectWithInfo(remote, nil)
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -201,8 +219,11 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
// FIXME if there are no directories, swift gives back the files for some reason!
|
||||
if directories && !strings.HasSuffix(object.Name, "/") {
|
||||
continue
|
||||
if directories {
|
||||
if !strings.HasSuffix(object.Name, "/") {
|
||||
continue
|
||||
}
|
||||
object.Name = object.Name[:len(object.Name)-1]
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Name)
|
||||
@@ -216,7 +237,7 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
|
||||
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,13 +248,13 @@ func (f *FsSwift) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a container using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *swift.Object) {
|
||||
if fs := f.NewFsObjectWithInfo(remote, object); fs != nil {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
})
|
||||
@@ -252,7 +273,7 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list containers: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list containers: %v", err)
|
||||
} else {
|
||||
for _, container := range containers {
|
||||
out <- &fs.Dir{
|
||||
@@ -307,6 +328,29 @@ func (fs *FsSwift) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*FsObjectSwift)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.swift
|
||||
_, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -379,14 +423,14 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta.SetModTime(modTime)
|
||||
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,6 +457,7 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
return err
|
||||
}
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
return err
|
||||
}
|
||||
@@ -424,4 +469,5 @@ func (o *FsObjectSwift) Remove() error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsSwift{}
|
||||
var _ fs.Copier = &FsSwift{}
|
||||
var _ fs.Object = &FsObjectSwift{}
|
||||
|
||||
54
swift/swift_test.go
Normal file
54
swift/swift_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Test Swift filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/swift"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
|
||||
fstests.RemoteName = "TestSwift:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
Reference in New Issue
Block a user