mirror of
https://github.com/rclone/rclone.git
synced 2025-12-15 15:53:41 +00:00
Compare commits
156 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4276abc58b | ||
|
|
a795d93bc3 | ||
|
|
5df04cb763 | ||
|
|
ef54167a4a | ||
|
|
d42cb11b84 | ||
|
|
b257de4aba | ||
|
|
365b4babae | ||
|
|
6d48dffa2f | ||
|
|
8f2999b6af | ||
|
|
be6115fbfa | ||
|
|
2fcb8f5db7 | ||
|
|
0ab3f020ab | ||
|
|
64c23c2f5b | ||
|
|
ff16e0f6df | ||
|
|
1a82ba196b | ||
|
|
ed72c678f8 | ||
|
|
4ed8836a71 | ||
|
|
5529978fa7 | ||
|
|
66d84c9914 | ||
|
|
b85ddc4e4f | ||
|
|
e4a9e27a55 | ||
|
|
22645eea2e | ||
|
|
345c98ed62 | ||
|
|
b872ff0237 | ||
|
|
1b95718460 | ||
|
|
6a3580c556 | ||
|
|
16c9fba5de | ||
|
|
4e952af614 | ||
|
|
6344c3051c | ||
|
|
ab9f521cbd | ||
|
|
3a900e5bb7 | ||
|
|
b4d7741611 | ||
|
|
95fd79faf9 | ||
|
|
b79dc01016 | ||
|
|
bf562d7373 | ||
|
|
2e9f2ea3d3 | ||
|
|
177dbbc29a | ||
|
|
4712043e26 | ||
|
|
852acd5e4e | ||
|
|
9f1daabb2c | ||
|
|
938dd24cc9 | ||
|
|
57aad81b68 | ||
|
|
a91bcaaeb0 | ||
|
|
d04c21b198 | ||
|
|
4a0a42c2f1 | ||
|
|
cc7b9af50e | ||
|
|
68fef49c55 | ||
|
|
5d4b149884 | ||
|
|
5f20ae707d | ||
|
|
e9c915e6fe | ||
|
|
2ed158aba3 | ||
|
|
05050d53ad | ||
|
|
e391311512 | ||
|
|
3234c28f7c | ||
|
|
6fbd9cf24b | ||
|
|
bc5b63ffef | ||
|
|
788ef76f1c | ||
|
|
0872ec3204 | ||
|
|
0a5870208e | ||
|
|
3219334c3e | ||
|
|
79fd662676 | ||
|
|
34193fd8d9 | ||
|
|
2203766f77 | ||
|
|
235cbe0e57 | ||
|
|
f50f353b5d | ||
|
|
00afe6cc96 | ||
|
|
dd48e62b7e | ||
|
|
a1a780e847 | ||
|
|
fa87077211 | ||
|
|
6ac7145d2d | ||
|
|
f1226f19b2 | ||
|
|
3ecbf2af25 | ||
|
|
79f2e95bf9 | ||
|
|
faee50b238 | ||
|
|
807d4a3c00 | ||
|
|
073d112204 | ||
|
|
14f814b806 | ||
|
|
a288c2b3a3 | ||
|
|
fec16b0ac8 | ||
|
|
dd8717797e | ||
|
|
7e7c239f09 | ||
|
|
edd0e8abb1 | ||
|
|
d2b537d9a1 | ||
|
|
8c3df224ef | ||
|
|
967fd2a778 | ||
|
|
ea12e446ca | ||
|
|
c8cd2b510f | ||
|
|
8b05a8322b | ||
|
|
c98a51b26c | ||
|
|
e2717a031e | ||
|
|
8d33ce0154 | ||
|
|
92745aa950 | ||
|
|
cbc6bf6a89 | ||
|
|
f72575e75f | ||
|
|
0168f55f3e | ||
|
|
8b60ab86a1 | ||
|
|
7463a7a509 | ||
|
|
9ed2de3d6e | ||
|
|
4f35fb59c8 | ||
|
|
59ba8f28c8 | ||
|
|
d298b578ab | ||
|
|
fabbc035c4 | ||
|
|
6530b07cde | ||
|
|
f8b7eaec93 | ||
|
|
5c226e91c0 | ||
|
|
8e3d45d2dc | ||
|
|
a96b522958 | ||
|
|
fedf81c2b7 | ||
|
|
0c6f816a49 | ||
|
|
dfe771fb0c | ||
|
|
bc19e2d84b | ||
|
|
8c4d91cff7 | ||
|
|
2fcc18779b | ||
|
|
96cc3e5a0b | ||
|
|
cc8fe0630c | ||
|
|
1d9e76bb0f | ||
|
|
337110b7a0 | ||
|
|
83733205f6 | ||
|
|
d8306938a1 | ||
|
|
88ea8b305d | ||
|
|
e2f4d7b5e3 | ||
|
|
8140869767 | ||
|
|
6a8de87116 | ||
|
|
0da6f24221 | ||
|
|
771e60bd07 | ||
|
|
40b3c4883f | ||
|
|
e320f4a988 | ||
|
|
5835f15f21 | ||
|
|
67c311233b | ||
|
|
3fcff32524 | ||
|
|
472f065ce7 | ||
|
|
6c6d7eb770 | ||
|
|
c646ada3c3 | ||
|
|
f55359b3ac | ||
|
|
9d9a17547a | ||
|
|
c6dc88766b | ||
|
|
754ce9dec6 | ||
|
|
bd5f685d0a | ||
|
|
c663e24669 | ||
|
|
5948764e9e | ||
|
|
539ad44757 | ||
|
|
74994a2ec1 | ||
|
|
97dced6a0b | ||
|
|
e04acb09ce | ||
|
|
87ed7fc932 | ||
|
|
90744301d3 | ||
|
|
bf4879f57f | ||
|
|
e22b445cff | ||
|
|
5ab7970e18 | ||
|
|
e984eeedc4 | ||
|
|
968b5a0984 | ||
|
|
7af1282375 | ||
|
|
d9fcc32f70 | ||
|
|
870a9fc3b2 | ||
|
|
8e3703abeb | ||
|
|
ba81277bbe |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -4,6 +4,7 @@ rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
README.html
|
||||
README.txt
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
|
||||
20
.travis.yml
20
.travis.yml
@@ -1,12 +1,24 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
- 1.4.2
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get ./...
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u golang.org/x/tools/cmd/goimports
|
||||
|
||||
script:
|
||||
- go get ./...
|
||||
- go vet ./...
|
||||
- diff <(goimports -d .) <(printf "")
|
||||
- diff <(golint ./...) <(printf "")
|
||||
- go test -v ./...
|
||||
- go test -cpu=2 -race -v ./...
|
||||
|
||||
34
Makefile
34
Makefile
@@ -1,3 +1,4 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell git describe --tags)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
@@ -10,16 +11,25 @@ test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
|
||||
doc: rclone.1 README.html README.txt
|
||||
check: rclone
|
||||
go vet ./...
|
||||
errcheck ./...
|
||||
golint ./...
|
||||
diff <(goimports -d .) <(printf "")
|
||||
|
||||
rclone.1: README.md
|
||||
pandoc -s --from markdown --to man README.md -o rclone.1
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
README.html: README.md
|
||||
pandoc -s --from markdown_github --to html README.md -o README.html
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
README.txt: README.md
|
||||
pandoc -s --from markdown_github --to plain README.md -o README.txt
|
||||
MANUAL.md: make_manual.py docs/content/*.md
|
||||
./make_manual.py
|
||||
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
@@ -29,7 +39,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -49,18 +59,18 @@ serve:
|
||||
tag:
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nconst Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in README.md"
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I`
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m "Version $(NEW_TAG)" -a -v
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go run gen_tests.go
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
360
README.md
360
README.md
@@ -1,12 +1,14 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jul 7, 2014
|
||||
|
||||
Rclone
|
||||
======
|
||||
|
||||
[](http://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://ci.appveyor.com/project/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Google Drive
|
||||
@@ -14,6 +16,7 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
@@ -26,352 +29,13 @@ Features
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
|
||||
See the Home page for more documentation and configuration walkthroughs.
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
Download the binary for your OS from
|
||||
|
||||
* http://rclone.org/downloads/
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go install github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
First you'll need to configure rclone. As the object storage systems
|
||||
have quite complicated authentication these are kept in a config file
|
||||
`.rclone.conf` in your home directory by default. (You can use the
|
||||
`--config` option to choose a different config file.)
|
||||
|
||||
The easiest way to make the config is to run rclone with the config
|
||||
option, Eg
|
||||
|
||||
rclone config
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Rclone syncs a directory tree from local to remote.
|
||||
|
||||
Its basic syntax is
|
||||
|
||||
Syntax: [options] subcommand <parameters> <parameters...>
|
||||
|
||||
See below for how to specify the source and destination paths.
|
||||
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
rclone md5sum [remote:path]
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
rclone mkdir remote:path
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone config
|
||||
|
||||
Enter an interactive configuration session.
|
||||
|
||||
rclone help
|
||||
|
||||
This help.
|
||||
|
||||
General options:
|
||||
|
||||
```
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
--contimeout=1m0s: Connect timeout
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--log-file="": Log everything to this file
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats (0 to disable)
|
||||
--timeout=5m0s: IO idle timeout
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
-V, --version=false: Print the version number
|
||||
```
|
||||
|
||||
Developer options:
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
|
||||
Local Filesystem
|
||||
----------------
|
||||
|
||||
Paths are specified as normal filesystem paths, so
|
||||
|
||||
rclone sync /home/source /tmp/destination
|
||||
|
||||
Will sync `/home/source` to `/tmp/destination`
|
||||
|
||||
Swift / Rackspace cloudfiles / Memset Memstore
|
||||
----------------------------------------------
|
||||
|
||||
Paths are specified as remote:container (or remote: for the `lsd`
|
||||
command.) You may put subdirectories in too, eg
|
||||
`remote:container/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a swift container called backup:
|
||||
|
||||
rclone sync /home/source swift:backup
|
||||
|
||||
For more help see the [online docs on Openstack Swift](http://rclone.org/swift).
|
||||
|
||||
Amazon S3
|
||||
---------
|
||||
|
||||
Paths are specified as remote:bucket. You may put subdirectories in
|
||||
too, eg `remote:bucket/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a s3 container called backup
|
||||
|
||||
rclone sync /home/source s3:backup
|
||||
|
||||
For more help see the [online docs on Amazon S3](http://rclone.org/s3).
|
||||
|
||||
Google drive
|
||||
------------
|
||||
|
||||
Paths are specified as remote:path Drive paths may be as deep as required.
|
||||
|
||||
The initial setup for drive involves getting a token from Google drive
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
For more help see the [online docs on Google Drive](http://rclone.org/drive).
|
||||
|
||||
Dropbox
|
||||
-------
|
||||
|
||||
Paths are specified as remote:path Dropbox paths may be as deep as required.
|
||||
|
||||
The initial setup for dropbox involves getting a token from Dropbox
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source dropbox:backup
|
||||
|
||||
For more help see the [online docs on Dropbox](http://rclone.org/dropbox).
|
||||
|
||||
Google Cloud Storage
|
||||
--------------------
|
||||
|
||||
Paths are specified as remote:path Google Cloud Storage paths may be
|
||||
as deep as required.
|
||||
|
||||
The initial setup for Google Cloud Storage involves getting a token
|
||||
from Google which you need to do in your browser. `rclone config`
|
||||
walks you through it.
|
||||
|
||||
To copy a local directory to a google cloud storage directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
For more help see the [online docs on Google Cloud Storage](http://rclone.org/googlecloudstorage/).
|
||||
|
||||
Single file copies
|
||||
------------------
|
||||
|
||||
Rclone can copy single files
|
||||
|
||||
rclone src:path/to/file dst:path/dir
|
||||
|
||||
Or
|
||||
|
||||
rclone src:path/to/file dst:path/to/file
|
||||
|
||||
Note that you can't rename the file if you are copying from one file to another.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
|
||||
Bugs
|
||||
----
|
||||
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
|
||||
Changelog
|
||||
---------
|
||||
* v1.13 - 2015-05-10
|
||||
* Revise documentation (especially sync)
|
||||
* Implement --timeout and --conntimeout
|
||||
* s3: ignore etags from multipart uploads which aren't md5sums
|
||||
* v1.12 - 2015-03-15
|
||||
* drive: Use chunked upload for files above a certain size
|
||||
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
|
||||
* drive: switch to insert from update when a failed copy deletes the upload
|
||||
* core: Log duplicate files if they are detected
|
||||
* v1.11 - 2015-03-04
|
||||
* swift: add region parameter
|
||||
* drive: fix crash on failed to update remote mtime
|
||||
* In remote paths, change native directory separators to /
|
||||
* Add synchronization to ls/lsl/lsd output to stop corruptions
|
||||
* Ensure all stats/log messages to go stderr
|
||||
* Add --log-file flag to log everything (including panics) to file
|
||||
* Make it possible to disable stats printing with --stats=0
|
||||
* Implement --bwlimit to limit data transfer bandwidth
|
||||
* v1.10 - 2015-02-12
|
||||
* s3: list an unlimited number of items
|
||||
* Fix getting stuck in the configurator
|
||||
* v1.09 - 2015-02-07
|
||||
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
|
||||
* local: Fix directory separators on Windows
|
||||
* drive: fix rate limit exceeded errors
|
||||
* v1.08 - 2015-02-04
|
||||
* drive: fix subdirectory listing to not list entire drive
|
||||
* drive: Fix SetModTime
|
||||
* dropbox: adapt code to recent library changes
|
||||
* v1.07 - 2014-12-23
|
||||
* google cloud storage: fix memory leak
|
||||
* v1.06 - 2014-12-12
|
||||
* Fix "Couldn't find home directory" on OSX
|
||||
* swift: Add tenant parameter
|
||||
* Use new location of Google API packages
|
||||
* v1.05 - 2014-08-09
|
||||
* Improved tests and consequently lots of minor fixes
|
||||
* core: Fix race detected by go race detector
|
||||
* core: Fixes after running errcheck
|
||||
* drive: reset root directory on Rmdir and Purge
|
||||
* fs: Document that Purger returns error on empty directory, test and fix
|
||||
* google cloud storage: fix ListDir on subdirectory
|
||||
* google cloud storage: re-read metadata in SetModTime
|
||||
* s3: make reading metadata more reliable to work around eventual consistency problems
|
||||
* s3: strip trailing / from ListDir()
|
||||
* swift: return directories without / in ListDir
|
||||
* v1.04 - 2014-07-21
|
||||
* google cloud storage: Fix crash on Update
|
||||
* v1.03 - 2014-07-20
|
||||
* swift, s3, dropbox: fix updated files being marked as corrupted
|
||||
* Make compile with go 1.1 again
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or send pull requests.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Your name goes here!
|
||||
|
||||
@@ -4,15 +4,18 @@ Required software for making a release
|
||||
* Run `gox -build-toolchain`
|
||||
* This assumes you have your own source checkout
|
||||
* pandoc for making the html and man pages
|
||||
* errcheck - go get github.com/kisielk/errcheck
|
||||
* golint - go get github.com/golang/lint
|
||||
|
||||
Making a release
|
||||
* go get -u -f -v ./...
|
||||
* make check
|
||||
* make test
|
||||
* make tag
|
||||
* edit README.md
|
||||
* git commit fs/version.go README.md docs/content/downloads.md
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross - not required for go >= 1.5
|
||||
* make cross
|
||||
* make upload
|
||||
* make upload_website
|
||||
|
||||
706
amazonclouddrive/amazonclouddrive.go
Normal file
706
amazonclouddrive/amazonclouddrive.go
Normal file
@@ -0,0 +1,706 @@
|
||||
// Package amazonclouddrive provides an interface to the Amazon Cloud
|
||||
// Drive object storage system.
|
||||
package amazonclouddrive
|
||||
|
||||
/*
|
||||
|
||||
FIXME make searching for directory in id and file in id more efficient
|
||||
- use the name: search parameter - remember the escaping rules
|
||||
- use Folder GetNode and GetFile
|
||||
|
||||
FIXME make the default for no files and no dirs be (FILE & FOLDER) so
|
||||
we ignore assets completely!
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
|
||||
rcloneClientSecret = "k8/NyszKm5vEkZXAwsbGkd6C3NrbjIqMg4qEhIeF14Szub2wur+/teS3ubXgsLe9//+tr/qoqK+lq6mg8vWkoA=="
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://www.amazon.com/ap/oa",
|
||||
TokenURL: "https://api.amazon.com/auth/o2/token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.Info{
|
||||
Name: "amazon cloud drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config(name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
c *acd.Client // the connection to the acd server
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a acd object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info *acd.Node // Info from the acd object if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Amazon cloud drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
409, // Conflict - happens in the unit tests a lot
|
||||
503, // Service Unavailable
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure amazon cloud drive: %v", err)
|
||||
}
|
||||
|
||||
c := acd.NewClient(oAuthClient)
|
||||
c.UserAgent = fs.UserAgent
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
}
|
||||
|
||||
// Update endpoints
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, resp, err = f.c.Account.GetEndpoints()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get endpoints: %v", err)
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
var rootInfo *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rootInfo, resp, err = f.c.Nodes.GetRoot()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil || rootInfo.Id == nil {
|
||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, *rootInfo.Id, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, *rootInfo.Id, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj := newF.newFsObjectWithInfo(remote, nil)
|
||||
if obj == nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if err == acd.ErrorNodeNotFound {
|
||||
//fs.Debug(f, "...Not found")
|
||||
return "", false, nil
|
||||
}
|
||||
//fs.Debug(f, "...Error %v", err)
|
||||
return "", false, err
|
||||
}
|
||||
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
|
||||
fs.Debug(f, "Ignoring folder %q in state %q", *subFolder.Status)
|
||||
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
|
||||
return "", false, nil
|
||||
}
|
||||
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
|
||||
return *subFolder.Id, true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return *info.Id, nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*acd.Node) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
query := "parents:" + dirID
|
||||
if directoriesOnly {
|
||||
query += " AND kind:" + folderKind
|
||||
} else if filesOnly {
|
||||
query += " AND kind:" + fileKind
|
||||
} else {
|
||||
// FIXME none of these work
|
||||
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
|
||||
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
|
||||
}
|
||||
opts := acd.NodeListOptions{
|
||||
Filters: query,
|
||||
}
|
||||
var nodes []*acd.Node
|
||||
//var resp *http.Response
|
||||
OUTER:
|
||||
for {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||
break
|
||||
}
|
||||
if nodes == nil {
|
||||
break
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
|
||||
// Ignore nodes if not AVAILABLE
|
||||
if *node.Status != statusAvailable {
|
||||
continue
|
||||
}
|
||||
if fn(node) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
_, err := f.listAll(dirID, "", false, false, func(node *acd.Node) bool {
|
||||
// Recurse on directories
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
wg.Add(1)
|
||||
folder := path + *node.Name + "/"
|
||||
fs.Debug(f, "Reading %s", folder)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f.listDirRecursive(*node.Id, folder, out)
|
||||
if err != nil {
|
||||
subError = err
|
||||
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||
}
|
||||
}()
|
||||
return false
|
||||
case fileKind:
|
||||
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
fs.Debug(f, "Finished reading %s", path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subError != nil {
|
||||
return subError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListDir lists the directories
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: *item.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate)
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
} else {
|
||||
info, resp, err = folder.PutSized(in, size, leaf)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.info = info.Node
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := dc.RootID()
|
||||
|
||||
if check {
|
||||
// check directory is empty
|
||||
empty := true
|
||||
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
empty = false
|
||||
return true
|
||||
case fileKind:
|
||||
empty = false
|
||||
return true
|
||||
default:
|
||||
fs.Debug("Found ASSET %s", *node.Id)
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !empty {
|
||||
return fmt.Errorf("Directory not empty")
|
||||
}
|
||||
}
|
||||
|
||||
node := acd.NodeFromId(rootID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = node.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// fs.Debug(src, "Can't copy - not same remote type")
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
// srcFs := srcObj.fs
|
||||
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return f.NewFsObject(remote), nil
|
||||
//}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
if o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(*o.info.ContentProperties.Size)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
o.info = info.Node
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
file := acd.File{Node: o.info}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
in, resp, err = file.Open()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return in, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size != 0 {
|
||||
info, resp, err = file.OverwriteSized(in, size)
|
||||
} else {
|
||||
info, resp, err = file.Overwrite(in)
|
||||
}
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = info.Node
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.info.Trash()
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
// _ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
56
amazonclouddrive/amazonclouddrive_test.go
Normal file
56
amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
21
appveyor.yml
Normal file
21
appveyor.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/vet
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -d ./...
|
||||
|
||||
build_script:
|
||||
- go vet ./...
|
||||
- go test -v -cpu=2 ./...
|
||||
- go test -cpu=2 -short -race ./...
|
||||
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
|
||||
# This uses gox from https://github.com/mitchellh/gox
|
||||
# Make sure you've run gox -build-toolchain
|
||||
# Make sure you've run gox -build-toolchain - not required for go >= 1.5
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
echo "Syntax: $0 Version"
|
||||
@@ -13,7 +13,9 @@ VERSION="$1"
|
||||
|
||||
rm -rf build
|
||||
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}"
|
||||
gox -output "build/{{.Dir}}-${VERSION}-{{.OS}}-{{.Arch}}/{{.Dir}}" -os "darwin linux freebsd openbsd windows freebsd netbsd plan9 solaris"
|
||||
# Not implemented yet: nacl dragonfly android
|
||||
# gox -osarch-list for definitive list
|
||||
|
||||
mv build/rclone-${VERSION}-darwin-amd64 build/rclone-${VERSION}-osx-amd64
|
||||
mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
@@ -21,10 +23,12 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
cd build
|
||||
|
||||
for d in `ls`; do
|
||||
cp -a ../README.txt $d/
|
||||
cp -a ../README.html $d/
|
||||
cp -a ../MANUAL.txt $d/README.txt
|
||||
cp -a ../MANUAL.html $d/README.html
|
||||
cp -a ../rclone.1 $d/
|
||||
zip -r9 $d.zip $d
|
||||
d_current=${d/-${VERSION}/-current}
|
||||
ln $d.zip $d_current.zip
|
||||
rm -rf $d
|
||||
done
|
||||
|
||||
|
||||
272
dircache/dircache.go
Normal file
272
dircache/dircache.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// Package dircache provides a simple cache for caching directory to path lookups
|
||||
package dircache
|
||||
|
||||
// _methods are called without the lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DirCache caches paths to directory IDs and vice versa
|
||||
type DirCache struct {
|
||||
cacheMu sync.RWMutex
|
||||
cache map[string]string
|
||||
invCache map[string]string
|
||||
mu sync.Mutex
|
||||
fs DirCacher // Interface to find and make stuff
|
||||
trueRootID string // ID of the absolute root
|
||||
root string // the path we are working on
|
||||
rootID string // ID of the root directory
|
||||
rootParentID string // ID of the root's parent directory
|
||||
foundRoot bool // Whether we have found the root or not
|
||||
}
|
||||
|
||||
// DirCacher describes an interface for doing the low level directory work
|
||||
type DirCacher interface {
|
||||
FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error)
|
||||
CreateDir(pathID, leaf string) (newID string, err error)
|
||||
}
|
||||
|
||||
// New makes a DirCache
|
||||
//
|
||||
// The cache is safe for concurrent use
|
||||
func New(root string, trueRootID string, fs DirCacher) *DirCache {
|
||||
d := &DirCache{
|
||||
trueRootID: trueRootID,
|
||||
root: root,
|
||||
fs: fs,
|
||||
}
|
||||
d.Flush()
|
||||
d.ResetRoot()
|
||||
return d
|
||||
}
|
||||
|
||||
// Get an ID given a path
|
||||
func (dc *DirCache) Get(path string) (id string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
id, ok = dc.cache[path]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetInv gets a path given an ID
|
||||
func (dc *DirCache) GetInv(id string) (path string, ok bool) {
|
||||
dc.cacheMu.RLock()
|
||||
path, ok = dc.invCache[id]
|
||||
dc.cacheMu.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Put a path, id into the map
|
||||
func (dc *DirCache) Put(path, id string) {
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache[path] = id
|
||||
dc.invCache[id] = path
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// Flush the map of all data
|
||||
func (dc *DirCache) Flush() {
|
||||
dc.cacheMu.Lock()
|
||||
dc.cache = make(map[string]string)
|
||||
dc.invCache = make(map[string]string)
|
||||
dc.cacheMu.Unlock()
|
||||
}
|
||||
|
||||
// SplitPath splits a path into directory, leaf
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If there are no slashes then directory will be "" and leaf = path
|
||||
func SplitPath(path string) (directory, leaf string) {
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash >= 0 {
|
||||
directory = path[:lastSlash]
|
||||
leaf = path[lastSlash+1:]
|
||||
} else {
|
||||
directory = ""
|
||||
leaf = path
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindDir finds the directory passed in returning the directory ID
|
||||
// starting from pathID
|
||||
//
|
||||
// Path shouldn't start or end with a /
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
//
|
||||
// Algorithm:
|
||||
// Look in the cache for the path, if found return the pathID
|
||||
// If not found strip the last path off the path and recurse
|
||||
// Now have a parent directory id, so look in the parent for self and return it
|
||||
func (dc *DirCache) FindDir(path string, create bool) (pathID string, err error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
return dc._findDir(path, create)
|
||||
}
|
||||
|
||||
// Look for the root and in the cache - safe to call without the mu
|
||||
func (dc *DirCache) _findDirInCache(path string) string {
|
||||
// fmt.Println("Finding",path,"create",create,"cache",cache)
|
||||
// If it is the root, then return it
|
||||
if path == "" {
|
||||
// fmt.Println("Root")
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// If it is in the cache then return it
|
||||
pathID, ok := dc.Get(path)
|
||||
if ok {
|
||||
// fmt.Println("Cache hit on", path)
|
||||
return pathID
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// Unlocked findDir - must have mu
|
||||
func (dc *DirCache) _findDir(path string, create bool) (pathID string, err error) {
|
||||
pathID = dc._findDirInCache(path)
|
||||
if pathID != "" {
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// Split the path into directory, leaf
|
||||
directory, leaf := SplitPath(path)
|
||||
|
||||
// Recurse and find pathID for parent directory
|
||||
parentPathID, err := dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
}
|
||||
|
||||
// Find the leaf in parentPathID
|
||||
pathID, found, err := dc.fs.FindLeaf(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If not found create the directory if required or return an error
|
||||
if !found {
|
||||
if create {
|
||||
pathID, err = dc.fs.CreateDir(parentPathID, leaf)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to make directory: %v", err)
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("Couldn't find directory: %q", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Store the leaf directory in the cache
|
||||
dc.Put(path, pathID)
|
||||
|
||||
// fmt.Println("Dir", path, "is", pathID)
|
||||
return pathID, nil
|
||||
}
|
||||
|
||||
// FindPath finds the leaf and directoryID from a path
|
||||
//
|
||||
// If create is set parent directories will be created if they don't exist
|
||||
func (dc *DirCache) FindPath(path string, create bool) (leaf, directoryID string, err error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
directory, leaf := SplitPath(path)
|
||||
directoryID, err = dc._findDir(directory, create)
|
||||
if err != nil {
|
||||
if create {
|
||||
err = fmt.Errorf("Couldn't find or make directory %q: %s", directory, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Couldn't find directory %q: %s", directory, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FindRoot finds the root directory if not already found
|
||||
//
|
||||
// Resets the root directory
|
||||
//
|
||||
// If create is set it will make the directory if not found
|
||||
func (dc *DirCache) FindRoot(create bool) error {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if dc.foundRoot {
|
||||
return nil
|
||||
}
|
||||
rootID, err := dc._findDir(dc.root, create)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dc.foundRoot = true
|
||||
dc.rootID = rootID
|
||||
|
||||
// Find the parent of the root while we still have the root
|
||||
// directory tree cached
|
||||
rootParentPath, _ := SplitPath(dc.root)
|
||||
dc.rootParentID, _ = dc.Get(rootParentPath)
|
||||
|
||||
// Reset the tree based on dc.root
|
||||
dc.Flush()
|
||||
// Put the root directory in
|
||||
dc.Put("", dc.rootID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FoundRoot returns whether the root directory has been found yet
|
||||
//
|
||||
// Call this from FindLeaf or CreateDir only
|
||||
func (dc *DirCache) FoundRoot() bool {
|
||||
return dc.foundRoot
|
||||
}
|
||||
|
||||
// RootID returns the ID of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootID() string {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
log.Fatalf("Internal Error: RootID() called before FindRoot")
|
||||
}
|
||||
return dc.rootID
|
||||
}
|
||||
|
||||
// RootParentID returns the ID of the parent of the root directory
|
||||
//
|
||||
// This should be called after FindRoot
|
||||
func (dc *DirCache) RootParentID() (string, error) {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
if !dc.foundRoot {
|
||||
return "", fmt.Errorf("Internal Error: RootID() called before FindRoot")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", fmt.Errorf("Internal Error: Didn't find rootParentID")
|
||||
}
|
||||
if dc.rootID == dc.trueRootID {
|
||||
return "", fmt.Errorf("Is root directory")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
// ResetRoot resets the root directory to the absolute root and clears
|
||||
// the DirCache
|
||||
func (dc *DirCache) ResetRoot() {
|
||||
dc.mu.Lock()
|
||||
defer dc.mu.Unlock()
|
||||
dc.foundRoot = false
|
||||
dc.Flush()
|
||||
|
||||
// Put the true root in
|
||||
dc.rootID = dc.trueRootID
|
||||
|
||||
// Put the root directory in
|
||||
dc.Put("", dc.rootID)
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
---
|
||||
title: "Rclone"
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox and Google Cloud Storage."
|
||||
description: "rclone syncs files to and from Google Drive, S3, Swift, Cloudfiles, Dropbox, Google Cloud Storage and Amazon Cloud Drive."
|
||||
type: page
|
||||
date: "2014-07-17"
|
||||
date: "2015-09-06"
|
||||
groups: ["about"]
|
||||
---
|
||||
|
||||
@@ -18,6 +18,8 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Cloud Drive
|
||||
* Microsoft One Drive
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
|
||||
104
docs/content/amazonclouddrive.md
Normal file
104
docs/content/amazonclouddrive.md
Normal file
@@ -0,0 +1,104 @@
|
||||
---
|
||||
title: "Amazon Cloud Drive"
|
||||
description: "Rclone docs for Amazon Cloud Drive"
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
<i class="fa fa-amazon"></i> Amazon Cloud Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Amazon cloud drive involves getting a token from
|
||||
Amazon which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 1
|
||||
Amazon Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Amazon Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Amazon. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||
you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Amazon cloud drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your Amazon cloud drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an Amazon cloud drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Amazon cloud drive doesn't allow modification times to be changed via
|
||||
the API so these won't be accurate or used for syncing.
|
||||
|
||||
It does store MD5SUMs so for a more accurate sync, you can use the
|
||||
`--checksum` flag.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Amazon
|
||||
don't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Amazon's apps or via
|
||||
the Amazon cloud drive website.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Amazon cloud drive is case sensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Amazon cloud drive has rate limiting so you may notice errors in the
|
||||
sync (429 errors). rclone will automatically retry the sync up to 3
|
||||
times by default (see `--retries` flag) which should hopefully work
|
||||
around this problem.
|
||||
20
docs/content/authors.md
Normal file
20
docs/content/authors.md
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2015-09-28"
|
||||
---
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Alex Couper <amcouper@gmail.com>
|
||||
* Leonid Shalupov <leonid@shalupov.com>
|
||||
* Shimon Doodkin <helpmepro1@gmail.com>
|
||||
* Colin Nicholson <colin@colinn.com>
|
||||
* Klaus Post <klauspost@gmail.com>
|
||||
* Sergey Tolmachev <tolsi.ru@gmail.com>
|
||||
31
docs/content/bugs.md
Normal file
31
docs/content/bugs.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "Bugs"
|
||||
description: "Rclone Bugs and Limitations"
|
||||
date: "2014-06-16"
|
||||
---
|
||||
|
||||
Bugs and Limitations
|
||||
--------------------
|
||||
|
||||
### Empty directories are left behind / not created ##
|
||||
|
||||
With remotes that have a concept of directory, eg Local and Drive,
|
||||
empty directories may be left behind, or not created when one was
|
||||
expected.
|
||||
|
||||
This is because rclone doesn't have a concept of a directory - it only
|
||||
works on objects. Most of the object storage systems can't actually
|
||||
store a directory so there is nowhere for rclone to store anything
|
||||
about directories.
|
||||
|
||||
You can work round this to some extent with the`purge` command which
|
||||
will delete everything under the path, **inluding** empty directories.
|
||||
|
||||
This may be fixed at some point in
|
||||
[Issue #100](https://github.com/ncw/rclone/issues/100)
|
||||
|
||||
### Directory timestamps aren't preserved ##
|
||||
|
||||
For the same reason as the above, rclone doesn't have a concept of a
|
||||
directory - it only works on objects, therefore it can't preserve the
|
||||
timestamps of directories.
|
||||
207
docs/content/changelog.md
Normal file
207
docs/content/changelog.md
Normal file
@@ -0,0 +1,207 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-11-07"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.24 - 2015-11-07
|
||||
* New features
|
||||
* Add support for Microsoft One Drive
|
||||
* Add `--no-check-certificate` option to disable server certificate verification
|
||||
* Add async readahead buffer for faster transfer of big files
|
||||
* Fixes
|
||||
* Allow spaces in remotes and check remote names for validity at creation time
|
||||
* Allow '&' and disallow ':' in Windows filenames.
|
||||
* Swift
|
||||
* Ignore directory marker objects where appropriate - allows working with Hubic
|
||||
* Don't delete the container if fs wasn't at root
|
||||
* S3
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* Google Cloud Storage
|
||||
* Don't delete the bucket if fs wasn't at root
|
||||
* v1.23 - 2015-10-03
|
||||
* New features
|
||||
* Implement `rclone size` for measuring remotes
|
||||
* Fixes
|
||||
* Fix headless config for drive and gcs
|
||||
* Tell the user they should try again if the webserver method failed
|
||||
* Improve output of `--dump-headers`
|
||||
* S3
|
||||
* Allow anonymous access to public buckets
|
||||
* Swift
|
||||
* Stop chunked operations logging "Failed to read info: Object Not Found"
|
||||
* Use Content-Length on uploads for extra reliability
|
||||
* v1.22 - 2015-09-28
|
||||
* Implement rsync like include and exclude flags
|
||||
* swift
|
||||
* Support files > 5GB - thanks Sergey Tolmachev
|
||||
* v1.21 - 2015-09-22
|
||||
* New features
|
||||
* Display individual transfer progress
|
||||
* Make lsl output times in localtime
|
||||
* Fixes
|
||||
* Fix allowing user to override credentials again in Drive, GCS and ACD
|
||||
* Amazon Cloud Drive
|
||||
* Implement compliant pacing scheme
|
||||
* Google Drive
|
||||
* Make directory reads concurrent for increased speed.
|
||||
* v1.20 - 2015-09-15
|
||||
* New features
|
||||
* Amazon Cloud Drive support
|
||||
* Oauth support redone - fix many bugs and improve usability
|
||||
* Use "golang.org/x/oauth2" as oauth libary of choice
|
||||
* Improve oauth usability for smoother initial signup
|
||||
* drive, googlecloudstorage: optionally use auto config for the oauth token
|
||||
* Implement --dump-headers and --dump-bodies debug flags
|
||||
* Show multiple matched commands if abbreviation too short
|
||||
* Implement server side move where possible
|
||||
* local
|
||||
* Always use UNC paths internally on Windows - fixes a lot of bugs
|
||||
* dropbox
|
||||
* force use of our custom transport which makes timeouts work
|
||||
* Thanks to Klaus Post for lots of help with this release
|
||||
* v1.19 - 2015-08-28
|
||||
* New features
|
||||
* Server side copies for s3/swift/drive/dropbox/gcs
|
||||
* Move command - uses server side copies if it can
|
||||
* Implement --retries flag - tries 3 times by default
|
||||
* Build for plan9/amd64 and solaris/amd64 too
|
||||
* Fixes
|
||||
* Make a current version download with a fixed URL for scripting
|
||||
* Ignore rmdir in limited fs rather than throwing error
|
||||
* dropbox
|
||||
* Increase chunk size to improve upload speeds massively
|
||||
* Issue an error message when trying to upload bad file name
|
||||
* v1.18 - 2015-08-17
|
||||
* drive
|
||||
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
|
||||
* Add "Forbidden to download" message for files with no downloadURL
|
||||
* dropbox
|
||||
* Remove datastore
|
||||
* This was deprecated and it caused a lot of problems
|
||||
* Modification times and MD5SUMs no longer stored
|
||||
* Fix uploading files > 2GB
|
||||
* s3
|
||||
* use official AWS SDK from github.com/aws/aws-sdk-go
|
||||
* **NB** will most likely require you to delete and recreate remote
|
||||
* enable multipart upload which enables files > 5GB
|
||||
* tested with Ceph / RadosGW / S3 emulation
|
||||
* many thanks to Sam Liston and Brian Haymore at the [Utah
|
||||
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
|
||||
* misc
|
||||
* Show errors when reading the config file
|
||||
* Do not print stats in quiet mode - thanks Leonid Shalupov
|
||||
* Add FAQ
|
||||
* Fix created directories not obeying umask
|
||||
* Linux installation instructions - thanks Shimon Doodkin
|
||||
* v1.17 - 2015-06-14
|
||||
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
|
||||
* v1.16 - 2015-06-09
|
||||
* Fix uploading big files which was causing timeouts or panics
|
||||
* Don't check md5sum after download with --size-only
|
||||
* v1.15 - 2015-06-06
|
||||
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
|
||||
* Implement --size-only flag to sync on size not checksum & modtime
|
||||
* Expand docs and remove duplicated information
|
||||
* Document rclone's limitations with directories
|
||||
* dropbox: update docs about case insensitivity
|
||||
* v1.14 - 2015-05-21
|
||||
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
|
||||
* drive: docs about rate limiting
|
||||
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
|
||||
* v1.13 - 2015-05-10
|
||||
* Revise documentation (especially sync)
|
||||
* Implement --timeout and --conntimeout
|
||||
* s3: ignore etags from multipart uploads which aren't md5sums
|
||||
* v1.12 - 2015-03-15
|
||||
* drive: Use chunked upload for files above a certain size
|
||||
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
|
||||
* drive: switch to insert from update when a failed copy deletes the upload
|
||||
* core: Log duplicate files if they are detected
|
||||
* v1.11 - 2015-03-04
|
||||
* swift: add region parameter
|
||||
* drive: fix crash on failed to update remote mtime
|
||||
* In remote paths, change native directory separators to /
|
||||
* Add synchronization to ls/lsl/lsd output to stop corruptions
|
||||
* Ensure all stats/log messages to go stderr
|
||||
* Add --log-file flag to log everything (including panics) to file
|
||||
* Make it possible to disable stats printing with --stats=0
|
||||
* Implement --bwlimit to limit data transfer bandwidth
|
||||
* v1.10 - 2015-02-12
|
||||
* s3: list an unlimited number of items
|
||||
* Fix getting stuck in the configurator
|
||||
* v1.09 - 2015-02-07
|
||||
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
|
||||
* local: Fix directory separators on Windows
|
||||
* drive: fix rate limit exceeded errors
|
||||
* v1.08 - 2015-02-04
|
||||
* drive: fix subdirectory listing to not list entire drive
|
||||
* drive: Fix SetModTime
|
||||
* dropbox: adapt code to recent library changes
|
||||
* v1.07 - 2014-12-23
|
||||
* google cloud storage: fix memory leak
|
||||
* v1.06 - 2014-12-12
|
||||
* Fix "Couldn't find home directory" on OSX
|
||||
* swift: Add tenant parameter
|
||||
* Use new location of Google API packages
|
||||
* v1.05 - 2014-08-09
|
||||
* Improved tests and consequently lots of minor fixes
|
||||
* core: Fix race detected by go race detector
|
||||
* core: Fixes after running errcheck
|
||||
* drive: reset root directory on Rmdir and Purge
|
||||
* fs: Document that Purger returns error on empty directory, test and fix
|
||||
* google cloud storage: fix ListDir on subdirectory
|
||||
* google cloud storage: re-read metadata in SetModTime
|
||||
* s3: make reading metadata more reliable to work around eventual consistency problems
|
||||
* s3: strip trailing / from ListDir()
|
||||
* swift: return directories without / in ListDir
|
||||
* v1.04 - 2014-07-21
|
||||
* google cloud storage: Fix crash on Update
|
||||
* v1.03 - 2014-07-20
|
||||
* swift, s3, dropbox: fix updated files being marked as corrupted
|
||||
* Make compile with go 1.1 again
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
@@ -5,8 +5,17 @@ date: "2014-04-26"
|
||||
---
|
||||
|
||||
Contact the rclone project
|
||||
--------------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or contribute pull
|
||||
requests.
|
||||
|
||||
See also
|
||||
|
||||
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
|
||||
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
|
||||
|
||||
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)
|
||||
|
||||
@@ -1,22 +1,9 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Documentation"
|
||||
date: "2014-07-17"
|
||||
description: "Rclone Usage"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
@@ -30,11 +17,13 @@ option:
|
||||
|
||||
rclone config
|
||||
|
||||
See below for detailed instructions for
|
||||
See the following for detailed instructions for
|
||||
|
||||
* [Google drive](/drive/)
|
||||
* [Amazon S3](/s3/)
|
||||
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Google Cloud Storage](/googlecloudstorage/)
|
||||
* [Local filesystem](/local/)
|
||||
|
||||
Usage
|
||||
@@ -55,13 +44,13 @@ You can define as many storage paths as you like in the config file.
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
### rclone copy source:path dest:path ###
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
### rclone sync source:path dest:path ###
|
||||
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
@@ -69,100 +58,269 @@ modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
### rclone ls remote:path ###
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
### rclone lsd remote:path ###
|
||||
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
### rclone lsl remote:path ###
|
||||
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
rclone md5sum [remote:path]
|
||||
### rclone md5sum remote:path ###
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
rclone mkdir remote:path
|
||||
### rclone size remote:path ###
|
||||
|
||||
Prints the total size of objects in remote:path and the number of
|
||||
objects.
|
||||
|
||||
### rclone mkdir remote:path ###
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
### rclone rmdir remote:path ###
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
### rclone purge remote:path ###
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
### rclone check source:path dest:path ###
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone config
|
||||
### rclone config ###
|
||||
|
||||
Enter an interactive configuration session.
|
||||
|
||||
rclone help
|
||||
### rclone help ###
|
||||
|
||||
This help.
|
||||
Prints help on rclone commands and options.
|
||||
|
||||
```
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
--contimeout=1m0s: Connect timeout
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--log-file="": Log everything to this file
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats (0 to disable)
|
||||
--timeout=5m0s: IO idle timeout
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
-V, --version=false: Print the version number
|
||||
```
|
||||
Server Side Copy
|
||||
----------------
|
||||
|
||||
Developer options:
|
||||
Drive, S3, Dropbox, Swift and Google Cloud Storage support server side
|
||||
copy.
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
This means if you want to copy one folder to another then rclone won't
|
||||
download all the files and re-upload them; it will instruct the server
|
||||
to copy them in place.
|
||||
|
||||
License
|
||||
Eg
|
||||
|
||||
rclone copy s3:oldbucket s3:newbucket
|
||||
|
||||
Will copy the contents of `oldbucket` to `newbucket` without
|
||||
downloading and re-uploading.
|
||||
|
||||
Remotes which don't support server side copy (eg local) **will**
|
||||
download and re-upload in this case.
|
||||
|
||||
Server side copies are used with `sync` and `copy` and will be
|
||||
identified in the log when using the `-v` flag.
|
||||
|
||||
Server side copies will only be attempted if the remote names are the
|
||||
same.
|
||||
|
||||
This can be used when scripting to make aged backups efficiently, eg
|
||||
|
||||
rclone sync remote:current-backup remote:previous-backup
|
||||
rclone sync /path/to/files remote:current-backup
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Bugs
|
||||
----
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
Options which use SIZE use kByte by default. However a suffix of `k`
|
||||
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
|
||||
the binary units, eg 2**10, 2**20, 2**30 respectively.
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
### --bwlimit=SIZE ###
|
||||
|
||||
The project website is at:
|
||||
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
|
||||
which means to not limit bandwidth.
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
This only limits the bandwidth of the data transfer, it doesn't limit
|
||||
the bandwith of the directory listings etc.
|
||||
|
||||
Authors
|
||||
-------
|
||||
### --checkers=N ###
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
The number of checkers to run in parallel. Checkers do the equality
|
||||
checking of files during a sync. For some storage systems (eg s3,
|
||||
swift, dropbox) this can take a significant amount of time so they are
|
||||
run in parallel.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
The default is to run 8 checkers in parallel.
|
||||
|
||||
* Your name goes here!
|
||||
### -c, --checksum ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
MD5SUM and size to determine if files are equal.
|
||||
|
||||
This is very useful when transferring between remotes which store the
|
||||
MD5SUM on the object which include swift, s3, drive, and google cloud
|
||||
storage.
|
||||
|
||||
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
|
||||
quicker than without the `--checksum` flag.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --config=CONFIG_FILE ###
|
||||
|
||||
Specify the location of the rclone config file. Normally this is in
|
||||
your home directory as a file called `.rclone.conf`. If you run
|
||||
`rclone -h` and look at the help for the `--config` option you will
|
||||
see where the default location is for you. Use this flag to override
|
||||
the config location, eg `rclone --config=".myconfig" .config`.
|
||||
|
||||
### --contimeout=TIME ###
|
||||
|
||||
Set the connection timeout. This should be in go time format which
|
||||
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
|
||||
|
||||
The connection timeout is the amount of time rclone will wait for a
|
||||
connection to go through to a remote object storage system. It is
|
||||
`1m` by default.
|
||||
|
||||
### -n, --dry-run ###
|
||||
|
||||
Do a trial run with no permanent changes. Use this in combination
|
||||
with the `-v` flag to see what rclone would do without actually doing
|
||||
it. Useful when setting up the `sync` command.
|
||||
|
||||
### --log-file=FILE ###
|
||||
|
||||
Log all of rclone's output to FILE. This is not active by default.
|
||||
This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
When checking whether a file has been modified, this is the maximum
|
||||
allowed time difference that a file can have and still be considered
|
||||
equivalent.
|
||||
|
||||
The default is `1ns` unless this is overridden by a remote. For
|
||||
example OS X only stores modification times to the nearest second so
|
||||
if you are reading and writing to an OS X filing system this will be
|
||||
`1s` by default.
|
||||
|
||||
This command line flag allows you to override that computed default.
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
Normally rclone outputs stats and a completion message. If you set
|
||||
this flag it will make as little output as possible.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
only the size.
|
||||
|
||||
This can be useful transferring files from dropbox which have been
|
||||
modified by the desktop sync client which doesn't set checksums of
|
||||
modification times in the same way as rclone.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --stats=TIME ###
|
||||
|
||||
Rclone will print stats at regular intervals to show its progress.
|
||||
|
||||
This sets the interval.
|
||||
|
||||
The default is `1m`. Use 0 to disable.
|
||||
|
||||
### --timeout=TIME ###
|
||||
|
||||
This sets the IO idle timeout. If a transfer has started but then
|
||||
becomes idle for this long it is considered broken and disconnected.
|
||||
|
||||
The default is `5m`. Set to 0 to disable.
|
||||
|
||||
### --transfers=N ###
|
||||
|
||||
The number of file transfers to run in parallel. It can sometimes be
|
||||
useful to set this to a smaller number if the remote is giving a lot
|
||||
of timeouts or bigger if you have lots of bandwidth and a fast remote.
|
||||
|
||||
The default is to run 4 file transfers in parallel.
|
||||
|
||||
### -v, --verbose ###
|
||||
|
||||
If you set this flag, rclone will become very verbose telling you
|
||||
about every file it considers and transfers.
|
||||
|
||||
Very useful for debugging.
|
||||
|
||||
### -V, --version ###
|
||||
|
||||
Prints the version number
|
||||
|
||||
Developer options
|
||||
-----------------
|
||||
|
||||
These options are useful when developing or debugging rclone. There
|
||||
are also some more remote specific options which aren't documented
|
||||
here which are used for testing. These start with remote name eg
|
||||
`--drive-test-option`.
|
||||
|
||||
### --cpuprofile=FILE ###
|
||||
|
||||
Write cpu profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
### --no-check-certificate=true/false ###
|
||||
|
||||
`--no-check-certificate` controls whether a client verifies the
|
||||
server's certificate chain and host name.
|
||||
If `--no-check-certificate` is true, TLS accepts any certificate
|
||||
presented by the server and any host name in that certificate.
|
||||
In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
|
||||
This option defaults to `false`.
|
||||
|
||||
**This should be used only for testing.**
|
||||
|
||||
Filtering
|
||||
---------
|
||||
|
||||
For the filtering options
|
||||
|
||||
* `--delete-excluded`
|
||||
* `--filter`
|
||||
* `--filter-from`
|
||||
* `--exclude`
|
||||
* `--exclude-from`
|
||||
* `--include`
|
||||
* `--include-from`
|
||||
* `--files-from`
|
||||
* `--min-size`
|
||||
* `--max-size`
|
||||
* `--dump-filters`
|
||||
|
||||
See the [filtering section](/filtering/).
|
||||
|
||||
30
docs/content/donate.md
Normal file
30
docs/content/donate.md
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
title: "Flowers for My Wife"
|
||||
description: "Flowers for My Wife."
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
Flowers for My Wife
|
||||
===================
|
||||
|
||||
Rclone is a pure open source for love-not-money project. However I've
|
||||
had requests for a donation page and coding it does take me away from
|
||||
something else I love - my wonderful wife.
|
||||
|
||||
So if you would like to send a donation, I will use it to buy flowers
|
||||
for her which will make her very happy.
|
||||
|
||||
<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
|
||||
<input type="hidden" name="cmd" value="_s-xclick">
|
||||
<input type="hidden" name="hosted_button_id" value="XQMMNUD5ZY49J">
|
||||
<input type="image" src="https://www.paypalobjects.com/en_US/GB/i/btn/btn_donateCC_LG.gif" border="0" name="submit" alt="PayPal – The safer, easier way to pay online.">
|
||||
<img alt="" border="0" src="https://www.paypalobjects.com/en_GB/i/scr/pixel.gif" width="1" height="1">
|
||||
</form>
|
||||
|
||||
If you would prefer to express your gratitude by promoting the
|
||||
project, or helping with it, I'd be over the moon with that too!
|
||||
|
||||
Thanks
|
||||
|
||||
Nick
|
||||
@@ -2,34 +2,73 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2015-05-10"
|
||||
date: "2015-11-07"
|
||||
---
|
||||
|
||||
Rclone Download v1.13
|
||||
Rclone Download v1.24
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.24-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-plan9-386.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.24-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -31,5 +31,44 @@ Rclone Download VERSION
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-VERSION-solaris-amd64.zip)
|
||||
|
||||
Downloads for scripting
|
||||
=======================
|
||||
|
||||
If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-current-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-current-plan9-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-plan9-amd64.zip)
|
||||
* Solaris
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-current-solaris-amd64.zip)
|
||||
|
||||
Older Downloads
|
||||
==============
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google drive"
|
||||
description: "Rclone docs for Google drive"
|
||||
date: "2015-05-10"
|
||||
date: "2015-09-12"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Drive
|
||||
@@ -34,15 +34,21 @@ Choose a number from below
|
||||
3) local
|
||||
4) drive
|
||||
type> 4
|
||||
Google Application Client Id - leave blank to use rclone's.
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
Google Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
Go to the following link in your browser
|
||||
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3XXXXX%3Awg%3Aoauth%3XX.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&state=state
|
||||
Log in, then type paste the token that is returned in the browser here
|
||||
Enter verification code> X/XXXXXXXXXXXXXXXXXX-XXXXXXXXX.XXXXXXXXX-XXXXX_XXXXXXX_XXXXXXX
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine or Y didn't work
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
@@ -55,6 +61,13 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if you use auto config mode. This only
|
||||
runs from the moment it opens your browser to the moment you get back
|
||||
the verification code. This is on `http://127.0.0.1:53682/` and this
|
||||
it may require you to unblock it temporarily if you are running a host
|
||||
firewall, or use manual mode.
|
||||
|
||||
You can then use it like this,
|
||||
|
||||
List directories in top level of your drive
|
||||
@@ -69,13 +82,11 @@ To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google drive stores modification times accurate to 1 ms.
|
||||
|
||||
Revisions
|
||||
---------
|
||||
### Revisions ###
|
||||
|
||||
Google drive stores revisions of files. When you upload a change to
|
||||
an existing file to google drive using rclone it will create a new
|
||||
@@ -86,3 +97,16 @@ was
|
||||
|
||||
* They are deleted after 30 days or 100 revisions (whatever comes first).
|
||||
* They do not count towards a user storage quota.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
By default rclone will delete files permanently when requested. If
|
||||
sending them to the trash is required instead then use the
|
||||
`--drive-use-trash` flag.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Drive has quite a lot of rate limiting. This causes rclone to be
|
||||
limited to transferring about 2 files per second only. Individual
|
||||
files may be transferred much faster at 100s of MBytes/s but lots of
|
||||
small files can take a long time.
|
||||
|
||||
@@ -37,9 +37,9 @@ Choose a number from below
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 5
|
||||
Dropbox App Key - leave blank to use rclone's.
|
||||
Dropbox App Key - leave blank normally.
|
||||
app_key>
|
||||
Dropbox App Secret - leave blank to use rclone's.
|
||||
Dropbox App Secret - leave blank normally.
|
||||
app_secret>
|
||||
Remote config
|
||||
Please visit:
|
||||
@@ -71,10 +71,18 @@ To copy a local directory to a dropbox directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
|
||||
a Dropbox datastore called "rclone". Dropbox datastores are limited
|
||||
to 100,000 rows so this is the maximum number of files rclone can
|
||||
manage on Dropbox.
|
||||
Dropbox doesn't have the capability of storing modification times or
|
||||
MD5SUMs so syncs will effectively have the `--size-only` flag set.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Dropbox is case sensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
There are some file names such as `thumbs.db` which Dropbox can't
|
||||
store. There is a full list of them in the ["Ignored Files" section
|
||||
of this document](https://www.dropbox.com/en/help/145). Rclone will
|
||||
issue an error message `File name disallowed - not uploading` if it
|
||||
attempt to upload one of those file names, but the sync won't fail.
|
||||
|
||||
139
docs/content/faq.md
Normal file
139
docs/content/faq.md
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
title: "FAQ"
|
||||
description: "Rclone Frequently Asked Questions"
|
||||
date: "2015-08-27"
|
||||
---
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
|
||||
### Do all cloud storage systems support all rclone commands ###
|
||||
|
||||
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
|
||||
### Can I copy the config from one machine to another ###
|
||||
|
||||
Sure! Rclone stores all of its config in a single file. If you want
|
||||
to find this file, the simplest way is to run `rclone -h` and look at
|
||||
the help for the `--config` flag which will tell you where it is. Eg,
|
||||
|
||||
```
|
||||
$ rclone -h
|
||||
Sync files and directories to and from local and remote object stores - v1.18.
|
||||
[snip]
|
||||
Options:
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
-c, --checksum=false: Skip based on checksum & size, not mod-time & size
|
||||
--config="/home/user/.rclone.conf": Config file.
|
||||
[snip]
|
||||
```
|
||||
|
||||
So in this config the config file can be found in
|
||||
`/home/user/.rclone.conf`.
|
||||
|
||||
Just copy that to the equivalent place in the destination (run `rclone
|
||||
-h` above again on the destination machine if not sure).
|
||||
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
Note that it effectively downloads the file and uploads it again, so
|
||||
the node running rclone would need to have lots of bandwidth.
|
||||
|
||||
The syncs would be incremental (on a file by file basis).
|
||||
|
||||
Eg
|
||||
|
||||
rclone sync drive:Folder s3:bucket
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
|
||||
You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, eg
|
||||
|
||||
```
|
||||
Server A> rclone sync /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync /tmp/whatever remote:ServerB
|
||||
```
|
||||
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
otherwise the two rclones may delete each others files, eg
|
||||
|
||||
```
|
||||
Server A> rclone copy /tmp/whatever remote:Backup
|
||||
Server B> rclone copy /tmp/whatever remote:Backup
|
||||
```
|
||||
|
||||
The file names you upload from Server A and Server B should be
|
||||
different in this case, otherwise some file systems (eg Drive) may
|
||||
make duplicates.
|
||||
|
||||
### Why doesn't rclone support partial transfers / binary diffs like rsync? ###
|
||||
|
||||
Rclone stores each file you transfer as a native object on the remote
|
||||
cloud storage system. This means that you can see the files you
|
||||
upload as expected using alternative access methods (eg using the
|
||||
Google Drive web interface). There is a 1:1 mapping between files on
|
||||
your hard disk and objects created in the cloud storage system.
|
||||
|
||||
Cloud storage systems (at least none I've come across yet) don't
|
||||
support partially uploading an object. You can't take an existing
|
||||
object, and change some bytes in the middle of it.
|
||||
|
||||
It would be possible to make a sync system which stored binary diffs
|
||||
instead of whole objects like rclone does, but that would break the
|
||||
1:1 mapping of files on your hard disk to objects in the remote cloud
|
||||
storage system.
|
||||
|
||||
All the cloud storage systems support partial downloads of content, so
|
||||
it would be possible to make partial downloads work. However to make
|
||||
this work efficiently this would require storing a significant amount
|
||||
of metadata, which breaks the desired 1:1 mapping of files to objects.
|
||||
|
||||
### Can rclone do bi-directional sync? ###
|
||||
|
||||
No, not at present. rclone only does uni-directional sync from A ->
|
||||
B. It may do in the future though since it has all the primitives - it
|
||||
just requires writing the algorithm to do it.
|
||||
|
||||
### Can I use rclone with an HTTP proxy? ###
|
||||
|
||||
Yes. rclone will use the environment variables `HTTP_PROXY`,
|
||||
`HTTPS_PROXY` and `NO_PROXY`, similar to cURL and other programs.
|
||||
|
||||
`HTTPS_PROXY` takes precedence over `HTTP_PROXY` for https requests.
|
||||
|
||||
The environment values may be either a complete URL or a "host[:port]",
|
||||
in which case the "http" scheme is assumed.
|
||||
|
||||
The `NO_PROXY` allows you to disable the proxy for specific hosts.
|
||||
Hosts must be comma separated, and can contain domains or parts.
|
||||
For instance "foo.com" also matches "bar.foo.com".
|
||||
|
||||
### Rclone gives x509: failed to load system roots and no roots provided error ###
|
||||
|
||||
This means that `rclone` can't file the SSL root certificates. Likely
|
||||
you are running `rclone` on a NAS with a cut-down Linux OS.
|
||||
|
||||
Rclone (via the Go runtime) tries to load the root certificates from
|
||||
these places on Linux.
|
||||
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
|
||||
So doing something like this should fix the problem. It also sets the
|
||||
time which is important for SSL to work properly.
|
||||
|
||||
```
|
||||
mkdir -p /etc/ssl/certs/
|
||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
ntpclient -s -h pool.ntp.org
|
||||
```
|
||||
273
docs/content/filtering.md
Normal file
273
docs/content/filtering.md
Normal file
@@ -0,0 +1,273 @@
|
||||
---
|
||||
title: "Filtering"
|
||||
description: "Filtering, includes and excludes"
|
||||
date: "2015-09-27"
|
||||
---
|
||||
|
||||
# Filtering, includes and excludes #
|
||||
|
||||
Rclone has a sophisticated set of include and exclude rules. Some of
|
||||
these are based on patterns and some on other things like file size.
|
||||
|
||||
Each path as it passes through rclone is matched against the include
|
||||
and exclude rules. The paths are matched without a leading `/`.
|
||||
|
||||
For example the files might be passed to the matching engine like this
|
||||
|
||||
* `file1.jpg`
|
||||
* `file2.jpg`
|
||||
* `directory/file3.jpg`
|
||||
|
||||
## Patterns ##
|
||||
|
||||
The patterns used to match files for inclusion or exclusion are based
|
||||
on "file globs" as used by the unix shell.
|
||||
|
||||
If the pattern starts with a `/` then it only matches at the top level
|
||||
of the directory tree. If it doesn't start with `/` then it is
|
||||
matched starting at the end of the path, but it will only match a
|
||||
complete path element.
|
||||
|
||||
file.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/afile.jpg"
|
||||
/file.jpg - matches "file.jpg"
|
||||
- doesn't match "afile.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
|
||||
A `*` matches anything but not a `/`.
|
||||
|
||||
*.jpg - matches "file.jpg"
|
||||
- matches "directory/file.jpg"
|
||||
- doesn't match "file.jpg/anotherfile.png"
|
||||
|
||||
Use `**` to match anything, including slashes.
|
||||
|
||||
dir/** - matches "dir/file.jpg"
|
||||
- matches "dir/dir1/dir2/file.jpg"
|
||||
- doesn't match "directory/file.jpg"
|
||||
- doesn't match "adir/file.jpg"
|
||||
|
||||
A `?` matches any character except a slash `/`.
|
||||
|
||||
l?ss - matches "less"
|
||||
- matches "lass"
|
||||
- doesn't match "floss"
|
||||
|
||||
A `[` and `]` together make a a character class, such as `[a-z]` or
|
||||
`[aeiou]` or `[[:alpha:]]`. See the [go regexp
|
||||
docs](https://golang.org/pkg/regexp/syntax/) for more info on these.
|
||||
|
||||
h[ae]llo - matches "hello"
|
||||
- matches "hallo"
|
||||
- doesn't match "hullo"
|
||||
|
||||
A `{` and `}` define a choice between elements. It should contain a
|
||||
comma seperated list of patterns, any of which might match. These
|
||||
patterns can contain wildcards.
|
||||
|
||||
{one,two}_potato - matches "one_potato"
|
||||
- matches "two_potato"
|
||||
- doesn't match "three_potato"
|
||||
- doesn't match "_potato"
|
||||
|
||||
Special characters can be escaped with a `\` before them.
|
||||
|
||||
\*.jpg - matches "*.jpg"
|
||||
\\.jpg - matches "\.jpg"
|
||||
\[one\].jpg - matches "[one].jpg"
|
||||
|
||||
### Differences between rsync and rclone patterns ###
|
||||
|
||||
Rclone implements bash style `{a,b,c}` glob matching which rsync doesn't.
|
||||
|
||||
Rclone ignores `/` at the end of a pattern.
|
||||
|
||||
Rclone always does a wildcard match so `\` must always escape a `\`.
|
||||
|
||||
## How the rules are used ##
|
||||
|
||||
Rclone maintains a list of include rules and exclude rules.
|
||||
|
||||
Each file is matched in order against the list until it finds a match.
|
||||
The file is then included or excluded according to the rule type.
|
||||
|
||||
If the matcher falls off the bottom of the list then the path is
|
||||
included.
|
||||
|
||||
For example given the following rules, `+` being include, `-` being
|
||||
exclude,
|
||||
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- *
|
||||
|
||||
This would include
|
||||
|
||||
* `file1.jpg`
|
||||
* `file3.png`
|
||||
* `file2.avi`
|
||||
|
||||
This would exclude
|
||||
|
||||
* `secret17.jpg`
|
||||
* non `*.jpg` and `*.png`
|
||||
|
||||
## Adding filtering rules ##
|
||||
|
||||
Filtering rules are added with the following command line flags.
|
||||
|
||||
### `--exclude` - Exclude files matching pattern ###
|
||||
|
||||
Add a single exclude rule with `--exclude`.
|
||||
|
||||
Eg `--exclude *.bak` to exclude all bak files from the sync.
|
||||
|
||||
### `--exclude-from` - Read exclude patterns from file ###
|
||||
|
||||
Add exclude rules from a file.
|
||||
|
||||
Prepare a file like this `exclude-file.txt`
|
||||
|
||||
# a sample exclude rule file
|
||||
*.bak
|
||||
file2.jpg
|
||||
|
||||
Then use as `--exclude-from exclude-file.txt`. This will sync all
|
||||
files except those ending in `bak` and `file2.jpg`.
|
||||
|
||||
This is useful if you have a lot of rules.
|
||||
|
||||
### `--include` - Include files matching pattern ###
|
||||
|
||||
Add a single include rule with `--include`.
|
||||
|
||||
Eg `--include *.{png,jpg}` to include all `png` and `jpg` files in the
|
||||
backup and no others.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
|
||||
### `--include-from` - Read include patterns from file ###
|
||||
|
||||
Add include rules from a file.
|
||||
|
||||
Prepare a file like this `include-file.txt`
|
||||
|
||||
# a sample include rule file
|
||||
*.jpg
|
||||
*.png
|
||||
file2.avi
|
||||
|
||||
Then use as `--include-from include-file.txt`. This will sync all
|
||||
`jpg`, `png` files and `file2.avi`.
|
||||
|
||||
This is useful if you have a lot of rules.
|
||||
|
||||
This adds an implicit `--exclude *` at the end of the filter list.
|
||||
|
||||
### `--filter` - Add a file-filtering rule ###
|
||||
|
||||
This can be used to add a single include or exclude rule. Include
|
||||
rules start with `+ ` and exclude rules start with `- `. A special
|
||||
rule called `!` can be used to clear the existing rules.
|
||||
|
||||
Eg `--filter "- *.bak"` to exclude all bak files from the sync.
|
||||
|
||||
### `--filter-from` - Read filtering patterns from a file ###
|
||||
|
||||
Add include/exclude rules from a file.
|
||||
|
||||
Prepare a file like this `filter-file.txt`
|
||||
|
||||
# a sample exclude rule file
|
||||
- secret*.jpg
|
||||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
# exclude everything else
|
||||
- *
|
||||
|
||||
Then use as `--filter-from filter-file.txt`. The rules are processed
|
||||
in the order that they are defined.
|
||||
|
||||
This example will include all `jpg` and `png` files, exclude any files
|
||||
matching `secret*.jpg` and include `file2.avi`. Everything else will
|
||||
be excluded from the sync.
|
||||
|
||||
### `--files-from` - Read list of source-file names ###
|
||||
|
||||
This reads a list of file names from the file passed in and **only**
|
||||
these files are transferred. The filtering rules are ignored
|
||||
completely if you use this option.
|
||||
|
||||
Prepare a file like this `files-from.txt`
|
||||
|
||||
# comment
|
||||
file1.jpg
|
||||
file2.jpg
|
||||
|
||||
Then use as `--files-from files-from.txt`. This will only transfer
|
||||
`file1.jpg` and `file2.jpg` providing they exist.
|
||||
|
||||
### `--min-size` - Don't transfer any file smaller than this ###
|
||||
|
||||
This option controls the minimum size file which will be transferred.
|
||||
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
|
||||
used.
|
||||
|
||||
For example `--min-size 50k` means no files smaller than 50kByte will be
|
||||
transferred.
|
||||
|
||||
### `--max-size` - Don't transfer any file larger than this ###
|
||||
|
||||
This option controls the maximum size file which will be transferred.
|
||||
This defaults to `kBytes` but a suffix of `k`, `M`, or `G` can be
|
||||
used.
|
||||
|
||||
For example `--max-size 1G` means no files larger than 1GByte will be
|
||||
transferred.
|
||||
|
||||
### `--delete-excluded` - Delete files on dest excluded from sync ###
|
||||
|
||||
**Important** this flag is dangerous - use with `--dry-run` and `-v` first.
|
||||
|
||||
When doing `rclone sync` this will delete any files which are excluded
|
||||
from the sync on the destination.
|
||||
|
||||
If for example you did a sync from `A` to `B` without the `--min-size 50k` flag
|
||||
|
||||
rclone sync A: B:
|
||||
|
||||
Then you repeated it like this with the `--delete-excluded`
|
||||
|
||||
rclone --min-size 50k --delete-excluded sync A: B:
|
||||
|
||||
This would delete all files on `B` which are less than 50 kBytes as
|
||||
these are now excluded from the sync.
|
||||
|
||||
Always test first with `--dry-run` and `-v` before using this flag.
|
||||
|
||||
### `--dump-filters` - dump the filters to the output ###
|
||||
|
||||
This dumps the defined filters to the output as regular expressions.
|
||||
|
||||
Useful for debugging.
|
||||
|
||||
## Quoting shell metacharacters ##
|
||||
|
||||
The examples above may not work verbatim in your shell as they have
|
||||
shell metacharacters in them (eg `*`), and may require quoting.
|
||||
|
||||
Eg linux, OSX
|
||||
|
||||
* `--include \*.jpg`
|
||||
* `--include '*.jpg'`
|
||||
* `--include='*.jpg'`
|
||||
|
||||
In Windows the expansion is done by the command not the shell so this
|
||||
should work fine
|
||||
|
||||
* `--include *.jpg`
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Google Cloud Storage"
|
||||
description: "Rclone docs for Google Cloud Storage"
|
||||
date: "2014-07-17"
|
||||
date: "2015-09-12"
|
||||
---
|
||||
|
||||
<i class="fa fa-google"></i> Google Cloud Storage
|
||||
@@ -35,9 +35,9 @@ Choose a number from below
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 4
|
||||
Google Application Client Id - leave blank to use rclone's.
|
||||
Google Application Client Id - leave blank normally.
|
||||
client_id>
|
||||
Google Application Client Secret - leave blank to use rclone's.
|
||||
Google Application Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Project number optional - needed only for list/create/delete buckets - see your developer console.
|
||||
project_number> 12345678
|
||||
@@ -70,10 +70,17 @@ Choose a number from below, or type in your own value
|
||||
5) publicReadWrite
|
||||
bucket_acl> 2
|
||||
Remote config
|
||||
Go to the following link in your browser
|
||||
https://accounts.google.com/o/oauth2/auth?access_type=&approval_prompt=&client_id=XXXXXXXXXXXX.apps.googleusercontent.com&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&response_type=code&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdevstorage.full_control&state=state
|
||||
Log in, then type paste the token that is returned in the browser here
|
||||
Enter verification code> x/xxxxxxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxxxxxxx_xxxxxxxx
|
||||
Remote config
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine or Y didn't work
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
type = google cloud storage
|
||||
@@ -90,6 +97,13 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if you use auto config mode. This only
|
||||
runs from the moment it opens your browser to the moment you get back
|
||||
the verification code. This is on `http://127.0.0.1:53682/` and this
|
||||
it may require you to unblock it temporarily if you are running a host
|
||||
firewall, or use manual mode.
|
||||
|
||||
This remote is called `remote` and can now be used like this
|
||||
|
||||
See all the buckets in your project
|
||||
@@ -109,8 +123,7 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google google cloud storage stores md5sums natively and rclone stores
|
||||
modification times as metadata on the object, under the "mtime" key in
|
||||
|
||||
39
docs/content/install.md
Normal file
39
docs/content/install.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "Install"
|
||||
description: "Rclone Installation"
|
||||
date: "2015-06-12"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`. If you have built
|
||||
rclone before then you will want to update its dependencies first with
|
||||
this (remove `-f` if using go < 1.4)
|
||||
|
||||
go get -u -v -f github.com/ncw/rclone/...
|
||||
|
||||
See the [Usage section](/docs/) of the docs for how to use rclone, or
|
||||
run `rclone -h`.
|
||||
|
||||
linux binary downloaded files install example
|
||||
-------
|
||||
|
||||
unzip rclone-v1.17-linux-amd64.zip
|
||||
cd rclone-v1.17-linux-amd64
|
||||
#copy binary file
|
||||
sudo cp rclone /usr/sbin/
|
||||
sudo chown root:root /usr/sbin/rclone
|
||||
sudo chmod 755 /usr/sbin/rclone
|
||||
#install manpage
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
34
docs/content/licence.md
Normal file
34
docs/content/licence.md
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "Licence"
|
||||
description: "Rclone Licence"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included with the source code).
|
||||
|
||||
```
|
||||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
```
|
||||
|
||||
@@ -16,10 +16,23 @@ Will sync `/home/source` to `/tmp/destination`
|
||||
These can be configured into the config file for consistencies sake,
|
||||
but it is probably easier not to.
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Rclone reads and writes the modified time using an accuracy determined by
|
||||
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
|
||||
on OS X.
|
||||
|
||||
### Filenames ###
|
||||
|
||||
Filenames are expected to be encoded in UTF-8 on disk. This is the
|
||||
normal case for Windows and OS X. There is a bit more uncertainty in
|
||||
the Linux world, but new distributions will have UTF-8 encoded files
|
||||
names.
|
||||
|
||||
If an invalid (non-UTF8) filename is read, the invalid caracters will
|
||||
be replaced with the unicode replacement character, '<27>'. `rclone`
|
||||
will emit a debug message in this case (use `-v` to see), eg
|
||||
|
||||
```
|
||||
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
|
||||
```
|
||||
|
||||
111
docs/content/onedrive.md
Normal file
111
docs/content/onedrive.md
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
title: "Microsoft One Drive"
|
||||
description: "Rclone docs for Microsoft One Drive"
|
||||
date: "2015-10-14"
|
||||
---
|
||||
|
||||
<i class="fa fa-windows"></i> Microsoft One Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for One Drive involves getting a token from
|
||||
Microsoft which you need to do in your browser. `rclone config` walks
|
||||
you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> remote
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) onedrive
|
||||
7) s3
|
||||
8) swift
|
||||
type> 6
|
||||
Microsoft App Client Id - leave blank normally.
|
||||
client_id>
|
||||
Microsoft App Client Secret - leave blank normally.
|
||||
client_secret>
|
||||
Remote config
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
Got code
|
||||
--------------------
|
||||
[remote]
|
||||
client_id =
|
||||
client_secret =
|
||||
token = {"access_token":"XXXXXX"}
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Microsoft. This only runs from the moment it
|
||||
opens your browser to the moment you get back the verification
|
||||
code. This is on `http://127.0.0.1:53682/` and this it may require
|
||||
you to unblock it temporarily if you are running a host firewall.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your One Drive
|
||||
|
||||
rclone lsd remote:
|
||||
|
||||
List all the files in your One Drive
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to an One Drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
One Drive allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
One drive does not support MD5SUMs. This means the `--checksum` flag
|
||||
will be equivalent to the `--size-only` flag.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
Any files you delete with rclone will end up in the trash. Microsoft
|
||||
doesn't provide an API to permanently delete files, nor to empty the
|
||||
trash, so you will have to do that with one of Microsoft's apps or via
|
||||
the One Drive website.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that One Drive is case sensitive so you can't have a
|
||||
file called "Hello.doc" and one called "hello.doc".
|
||||
|
||||
Rclone only supports your default One Drive, and doesn't work with One
|
||||
Drive for business. Both these issues may be fixed at some point
|
||||
depending on user demand!
|
||||
|
||||
There are quite a few characters that can't be in One Drive file
|
||||
names. These can't occur on Windows platforms, but on non-Windows
|
||||
platforms they are common. Rclone will map these names to and from an
|
||||
identical looking unicode equivalent. For example if a file has a `?`
|
||||
in it will be mapped to `?` instead.
|
||||
72
docs/content/overview.md
Normal file
72
docs/content/overview.md
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
title: "Overview of cloud storage systems"
|
||||
description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
|
||||
Each cloud storage system is slighly different. Rclone attempts to
|
||||
provide a unified interface to them, but some underlying differences
|
||||
show through.
|
||||
|
||||
## Features ##
|
||||
|
||||
Here is an overview of the major features of each cloud storage system.
|
||||
|
||||
| Name | MD5SUM | ModTime | Case Sensitive | Duplicate Files |
|
||||
| ---------------------- |:-------:|:-------:|:--------------:|:---------------:|
|
||||
| Google Drive | Yes | Yes | No | Yes |
|
||||
| Amazon S3 | Yes | Yes | No | No |
|
||||
| Openstack Swift | Yes | Yes | No | No |
|
||||
| Dropbox | No | No | Yes | No |
|
||||
| Google Cloud Storage | Yes | Yes | No | No |
|
||||
| Amazon Cloud Drive | Yes | No | Yes | No |
|
||||
| Microsoft One Drive | No | Yes | Yes | No |
|
||||
| The local filesystem | Yes | Yes | Depends | No |
|
||||
|
||||
### MD5SUM ###
|
||||
|
||||
The cloud storage system supports MD5SUMs of the objects. This
|
||||
is used if available when transferring data as an integrity check and
|
||||
can be specifically used with the `--checksum` flag in syncs and in
|
||||
the `check` command.
|
||||
|
||||
### ModTime ###
|
||||
|
||||
The cloud storage system supports setting modification times on
|
||||
objects. If it does then this enables a using the modification times
|
||||
as part of the sync. If not then only the size will be checked by
|
||||
default, though the MD5SUM can be checked with the `--checksum` flag.
|
||||
|
||||
All cloud storage systems support some kind of date on the object and
|
||||
these will be set when transferring from the cloud storage system.
|
||||
|
||||
### Case Sensitive ###
|
||||
|
||||
If a cloud storage systems is case sensitive then it is possible to
|
||||
have two files which differ only in case, eg `file.txt` and
|
||||
`FILE.txt`. If a cloud storage system is case insensitive then that
|
||||
isn't possible.
|
||||
|
||||
This can cause problems when syncing between a case insensitive
|
||||
system and a case sensitive system. The symptom of this is that no
|
||||
matter how many times you run the sync it never completes fully.
|
||||
|
||||
The local filesystem may or may not be case sensitive depending on OS.
|
||||
|
||||
* Windows - usuall case insensitive
|
||||
* OSX - usually case insensitive, though it is possible to format case sensitive
|
||||
* Linux - usually case sensitive, but there are case insensitive file systems (eg FAT formatted USB keys)
|
||||
|
||||
Most of the time this doesn't cause any problems as people tend to
|
||||
avoid files whose name differs only by case even on case sensitive
|
||||
systems.
|
||||
|
||||
### Duplicate files ###
|
||||
|
||||
If a cloud storage system allows duplicate files then it can have two
|
||||
objects with the same name.
|
||||
|
||||
This confuses rclone greatly when syncing.
|
||||
65
docs/content/privacy.md
Normal file
65
docs/content/privacy.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
title: "Privacy Policy"
|
||||
description: "Rclone Privacy Policy"
|
||||
date: "2015-08-19"
|
||||
---
|
||||
|
||||
# Rclone Privacy Policy #
|
||||
|
||||
## What is this Privacy Policy for? ##
|
||||
|
||||
This privacy policy is for this website http://rclone.org and governs the privacy of its users who choose to use it.
|
||||
|
||||
The policy sets out the different areas where user privacy is concerned and outlines the obligations & requirements of the users, the website and website owners. Furthermore the way this website processes, stores and protects user data and information will also be detailed within this policy.
|
||||
|
||||
## The Website ##
|
||||
|
||||
This website and its owners take a proactive approach to user privacy and ensure the necessary steps are taken to protect the privacy of its users throughout their visiting experience. This website complies to all UK national laws and requirements for user privacy.
|
||||
|
||||
## Use of Cookies ##
|
||||
|
||||
This website uses cookies to better the users experience while visiting the website. Where applicable this website uses a cookie control system allowing the user on their first visit to the website to allow or disallow the use of cookies on their computer / device. This complies with recent legislation requirements for websites to obtain explicit consent from users before leaving behind or reading files such as cookies on a user's computer / device.
|
||||
|
||||
Cookies are small files saved to the user's computers hard drive that track, save and store information about the user's interactions and usage of the website. This allows the website, through its server to provide the users with a tailored experience within this website.
|
||||
|
||||
Users are advised that if they wish to deny the use and saving of cookies from this website on to their computers hard drive they should take necessary steps within their web browsers security settings to block all cookies from this website and its external serving vendors.
|
||||
|
||||
This website uses tracking software to monitor its visitors to better understand how they use it. This software is provided by Google Analytics which uses cookies to track visitor usage. The software will save a cookie to your computers hard drive in order to track and monitor your engagement and usage of the website, but will not store, save or collect personal information. You can read [Google's privacy policy here](http://www.google.com/privacy.html) for further information.
|
||||
|
||||
Other cookies may be stored to your computers hard drive by external vendors when this website uses referral programs, sponsored links or adverts. Such cookies are used for conversion and referral tracking and typically expire after 30 days, though some may take longer. No personal information is stored, saved or collected.
|
||||
|
||||
## Contact & Communication ##
|
||||
|
||||
Users contacting this website and/or its owners do so at their own discretion and provide any such personal details requested at their own risk. Your personal information is kept private and stored securely until a time it is no longer required or has no use, as detailed in the Data Protection Act 1998.
|
||||
|
||||
This website and its owners use any information submitted to provide you with further information about the products / services they offer or to assist you in answering any questions or queries you may have submitted.
|
||||
|
||||
## External Links ##
|
||||
|
||||
Although this website only looks to include quality, safe and relevant external links, users are advised adopt a policy of caution before clicking any external web links mentioned throughout this website.
|
||||
|
||||
The owners of this website cannot guarantee or verify the contents of any externally linked website despite their best efforts. Users should therefore note they click on external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
## Adverts and Sponsored Links ##
|
||||
|
||||
This website may contain sponsored links and adverts. These will typically be served through our advertising partners, to whom may have detailed privacy policies relating directly to the adverts they serve.
|
||||
|
||||
Clicking on any such adverts will send you to the advertisers website through a referral program which may use cookies and will track the number of referrals sent from this website. This may include the use of cookies which may in turn be saved on your computers hard drive. Users should therefore note they click on sponsored external links at their own risk and this website and its owners cannot be held liable for any damages or implications caused by visiting any external links mentioned.
|
||||
|
||||
### Social Media Platforms ##
|
||||
|
||||
Communication, engagement and actions taken through external social media platforms that this website and its owners participate on are subject to the terms and conditions as well as the privacy policies held with each social media platform respectively.
|
||||
|
||||
Users are advised to use social media platforms wisely and communicate / engage upon them with due care and caution in regard to their own privacy and personal details. This website nor its owners will ever ask for personal or sensitive information through social media platforms and encourage users wishing to discuss sensitive details to contact them through primary communication channels such as email.
|
||||
|
||||
This website may use social sharing buttons which help share web content directly from web pages to the social media platform in question. Users are advised before using such social sharing buttons that they do so at their own discretion and note that the social media platform may track and save your request to share a web page respectively through your social media platform account.
|
||||
|
||||
## Resources & Further Information ##
|
||||
|
||||
* [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
* [Privacy and Electronic Communications Regulations 2003](http://www.legislation.gov.uk/uksi/2003/2426/contents/made)
|
||||
* [Privacy and Electronic Communications Regulations 2003 - The Guide](https://ico.org.uk/for-organisations/guide-to-pecr/)
|
||||
* [Twitter Privacy Policy](http://twitter.com/privacy)
|
||||
* [Facebook Privacy Policy](http://www.facebook.com/about/privacy/)
|
||||
* [Google Privacy Policy](http://www.google.com/privacy.html)
|
||||
* [Sample Website Privacy Policy](http://www.jamieking.co.uk/resources/free_sample_privacy_policy.html)
|
||||
@@ -4,7 +4,7 @@ description: "Rclone docs for Amazon S3"
|
||||
date: "2014-04-26"
|
||||
---
|
||||
|
||||
<i class="fa fa-archive"></i> Amazon S3
|
||||
<i class="fa fa-amazon"></i> Amazon S3
|
||||
---------------------------------------
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
@@ -27,41 +27,55 @@ Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 2
|
||||
AWS Access Key ID.
|
||||
access_key_id> accesskey
|
||||
AWS Secret Access Key (password).
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for S3 API.
|
||||
Region to connect to.
|
||||
Choose a number from below, or type in your own value
|
||||
* The default endpoint - a good choice if you are unsure.
|
||||
* US Region, Northern Virginia or Pacific Northwest.
|
||||
* Leave location constraint empty.
|
||||
1) https://s3.amazonaws.com/
|
||||
* US Region, Northern Virginia only.
|
||||
* Leave location constraint empty.
|
||||
2) https://s3-external-1.amazonaws.com
|
||||
1) us-east-1
|
||||
* US West (Oregon) Region
|
||||
* Needs location constraint us-west-2.
|
||||
2) us-west-2
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region
|
||||
* Needs location constraint sa-east-1.
|
||||
9) https://s3-sa-east-1.amazonaws.com
|
||||
endpoint> 1
|
||||
Location constraint - must be set to match the Endpoint.
|
||||
9) sa-east-1
|
||||
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
|
||||
10) other-v2-signature
|
||||
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
|
||||
11) other-v4-signature
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Leave blank if using AWS to use the default endpoint for the region.
|
||||
Specify if using an S3 clone such as Ceph.
|
||||
endpoint>
|
||||
Location constraint - must be set to match the Region. Used when creating buckets only.
|
||||
Choose a number from below, or type in your own value
|
||||
* Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
1)
|
||||
* US West (Oregon) Region.
|
||||
2) us-west-2
|
||||
* US West (Northern California) Region.
|
||||
3) us-west-1
|
||||
* EU (Ireland) Region.
|
||||
4) eu-west-1
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region.
|
||||
9) sa-east-1
|
||||
location_constraint> 1
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
access_key_id = accesskey
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = https://s3.amazonaws.com/
|
||||
region = us-east-1
|
||||
endpoint =
|
||||
location_constraint =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
@@ -100,8 +114,98 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded with multipart
|
||||
upload don't have an MD5SUM.
|
||||
|
||||
### Buckets and Regions ###
|
||||
|
||||
With Amazon S3 you can list buckets (`rclone lsd`) using any region,
|
||||
but you can only access the content of a bucket from the region it was
|
||||
created in. If you attempt to access a bucket from the wrong region,
|
||||
you will get an error, `incorrect region, the bucket is not in 'XXX'
|
||||
region`.
|
||||
|
||||
### Anonymous access to public buckets ###
|
||||
|
||||
If you want to use rclone to access a public bucket, configure with a
|
||||
blank `access_key_id` and `secret_access_key`. Eg
|
||||
|
||||
```
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
name> anons3
|
||||
What type of source is it?
|
||||
Choose a number from below
|
||||
1) amazon cloud drive
|
||||
2) drive
|
||||
3) dropbox
|
||||
4) google cloud storage
|
||||
5) local
|
||||
6) s3
|
||||
7) swift
|
||||
type> 6
|
||||
AWS Access Key ID - leave blank for anonymous access.
|
||||
access_key_id>
|
||||
AWS Secret Access Key (password) - leave blank for anonymous access.
|
||||
secret_access_key>
|
||||
Region to connect to.
|
||||
region> 1
|
||||
endpoint>
|
||||
location_constraint>
|
||||
```
|
||||
|
||||
Then use it as normal with the name of the public bucket, eg
|
||||
|
||||
rclone lsd anons3:1000genomes
|
||||
|
||||
You will be able to list and copy data but not upload it.
|
||||
|
||||
### Ceph ###
|
||||
|
||||
Ceph is an object storage system which presents an Amazon S3 interface.
|
||||
|
||||
To use rclone with ceph, you need to set the following parameters in
|
||||
the config.
|
||||
|
||||
```
|
||||
access_key_id = Whatever
|
||||
secret_access_key = Whatever
|
||||
endpoint = https://ceph.endpoint.goes.here/
|
||||
region = other-v2-signature
|
||||
```
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
only write `/` in the secret access key.
|
||||
|
||||
Eg the dump from Ceph looks something like this (irrelevant keys
|
||||
removed).
|
||||
|
||||
```
|
||||
{
|
||||
"user_id": "xxx",
|
||||
"display_name": "xxxx",
|
||||
"keys": [
|
||||
{
|
||||
"user": "xxx",
|
||||
"access_key": "xxxxxx",
|
||||
"secret_key": "xxxxxx\/xxxx"
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
Because this is a json dump, it is encoding the `/` as `\/`, so if you
|
||||
use the secret key as `xxxxxx/xxxx` it will work fine.
|
||||
|
||||
@@ -87,8 +87,7 @@ excess files in the container.
|
||||
|
||||
rclone sync /home/local/directory remote:container
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
|
||||
<script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
|
||||
@@ -12,19 +12,36 @@
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
|
||||
<li><a href="/filtering/"><i class="fa fa-book"></i> Filtering</a></li>
|
||||
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
|
||||
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
|
||||
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
|
||||
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
|
||||
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
|
||||
<li><a href="/donate/"><i class="fa fa-book"></i> Donate</a></li>
|
||||
<li><a href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/overview/"><i class="fa fa-archive"></i> Overview</a></li>
|
||||
<li><a href="/drive/"><i class="fa fa-google"></i> Drive</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-archive"></i> S3</a></li>
|
||||
<li><a href="/s3/"><i class="fa fa-amazon"></i> S3</a></li>
|
||||
<li><a href="/swift/"><i class="fa fa-space-shuttle"></i> Swift</a></li>
|
||||
<li><a href="/dropbox/"><i class="fa fa-dropbox"></i> Dropbox</a></li>
|
||||
<li><a href="/googlecloudstorage/"><i class="fa fa-google"></i> Google Cloud Storage</a></li>
|
||||
<li><a href="/amazonclouddrive/"><i class="fa fa-amazon"></i> Amazon Cloud Drive</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft One Drive</a></li>
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
21
docs/static/css/custom.css
vendored
21
docs/static/css/custom.css
vendored
@@ -4,4 +4,23 @@ body {
|
||||
|
||||
footer {
|
||||
margin: 50px 0;
|
||||
}
|
||||
}
|
||||
|
||||
table {
|
||||
background-color:#e0e0ff
|
||||
}
|
||||
|
||||
tbody td, th {
|
||||
border: 1px solid black;
|
||||
padding: 3px 7px 2px 7px;
|
||||
}
|
||||
|
||||
thead td, th {
|
||||
border: 1px solid black;
|
||||
padding: 3px 7px 2px 7px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
tbody tr:nth-child(odd) {
|
||||
background-color:#d0d0ff
|
||||
}
|
||||
|
||||
542
docs/static/css/font-awesome.css
vendored
542
docs/static/css/font-awesome.css
vendored
@@ -1,22 +1,21 @@
|
||||
/*!
|
||||
* Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* Font Awesome 4.4.0 by @davegandy - http://fontawesome.io - @fontawesome
|
||||
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
|
||||
*/
|
||||
/* FONT PATH
|
||||
* -------------------------- */
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
|
||||
src: url('../fonts/fontawesome-webfont.eot?v=4.4.0');
|
||||
src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.4.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff2?v=4.4.0') format('woff2'), url('../fonts/fontawesome-webfont.woff?v=4.4.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.4.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.4.0#fontawesomeregular') format('svg');
|
||||
font-weight: normal;
|
||||
font-style: normal;
|
||||
}
|
||||
.fa {
|
||||
display: inline-block;
|
||||
font-family: FontAwesome;
|
||||
font-style: normal;
|
||||
font-weight: normal;
|
||||
line-height: 1;
|
||||
font: normal normal normal 14px/1 FontAwesome;
|
||||
font-size: inherit;
|
||||
text-rendering: auto;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
@@ -65,6 +64,19 @@
|
||||
border: solid 0.08em #eeeeee;
|
||||
border-radius: .1em;
|
||||
}
|
||||
.fa-pull-left {
|
||||
float: left;
|
||||
}
|
||||
.fa-pull-right {
|
||||
float: right;
|
||||
}
|
||||
.fa.fa-pull-left {
|
||||
margin-right: .3em;
|
||||
}
|
||||
.fa.fa-pull-right {
|
||||
margin-left: .3em;
|
||||
}
|
||||
/* Deprecated as of 4.4.0 */
|
||||
.pull-right {
|
||||
float: right;
|
||||
}
|
||||
@@ -78,36 +90,24 @@
|
||||
margin-left: .3em;
|
||||
}
|
||||
.fa-spin {
|
||||
-webkit-animation: spin 2s infinite linear;
|
||||
-moz-animation: spin 2s infinite linear;
|
||||
-o-animation: spin 2s infinite linear;
|
||||
animation: spin 2s infinite linear;
|
||||
-webkit-animation: fa-spin 2s infinite linear;
|
||||
animation: fa-spin 2s infinite linear;
|
||||
}
|
||||
@-moz-keyframes spin {
|
||||
0% {
|
||||
-moz-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-moz-transform: rotate(359deg);
|
||||
}
|
||||
.fa-pulse {
|
||||
-webkit-animation: fa-spin 1s infinite steps(8);
|
||||
animation: fa-spin 1s infinite steps(8);
|
||||
}
|
||||
@-webkit-keyframes spin {
|
||||
@-webkit-keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-webkit-transform: rotate(359deg);
|
||||
transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@-o-keyframes spin {
|
||||
0% {
|
||||
-o-transform: rotate(0deg);
|
||||
}
|
||||
100% {
|
||||
-o-transform: rotate(359deg);
|
||||
}
|
||||
}
|
||||
@keyframes spin {
|
||||
@keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg);
|
||||
@@ -120,43 +120,40 @@
|
||||
.fa-rotate-90 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
|
||||
-webkit-transform: rotate(90deg);
|
||||
-moz-transform: rotate(90deg);
|
||||
-ms-transform: rotate(90deg);
|
||||
-o-transform: rotate(90deg);
|
||||
transform: rotate(90deg);
|
||||
}
|
||||
.fa-rotate-180 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
|
||||
-webkit-transform: rotate(180deg);
|
||||
-moz-transform: rotate(180deg);
|
||||
-ms-transform: rotate(180deg);
|
||||
-o-transform: rotate(180deg);
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
.fa-rotate-270 {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
|
||||
-webkit-transform: rotate(270deg);
|
||||
-moz-transform: rotate(270deg);
|
||||
-ms-transform: rotate(270deg);
|
||||
-o-transform: rotate(270deg);
|
||||
transform: rotate(270deg);
|
||||
}
|
||||
.fa-flip-horizontal {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
|
||||
-webkit-transform: scale(-1, 1);
|
||||
-moz-transform: scale(-1, 1);
|
||||
-ms-transform: scale(-1, 1);
|
||||
-o-transform: scale(-1, 1);
|
||||
transform: scale(-1, 1);
|
||||
}
|
||||
.fa-flip-vertical {
|
||||
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
|
||||
-webkit-transform: scale(1, -1);
|
||||
-moz-transform: scale(1, -1);
|
||||
-ms-transform: scale(1, -1);
|
||||
-o-transform: scale(1, -1);
|
||||
transform: scale(1, -1);
|
||||
}
|
||||
:root .fa-rotate-90,
|
||||
:root .fa-rotate-180,
|
||||
:root .fa-rotate-270,
|
||||
:root .fa-flip-horizontal,
|
||||
:root .fa-flip-vertical {
|
||||
filter: none;
|
||||
}
|
||||
.fa-stack {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
@@ -222,6 +219,8 @@
|
||||
.fa-check:before {
|
||||
content: "\f00c";
|
||||
}
|
||||
.fa-remove:before,
|
||||
.fa-close:before,
|
||||
.fa-times:before {
|
||||
content: "\f00d";
|
||||
}
|
||||
@@ -551,7 +550,8 @@
|
||||
.fa-arrows-h:before {
|
||||
content: "\f07e";
|
||||
}
|
||||
.fa-bar-chart-o:before {
|
||||
.fa-bar-chart-o:before,
|
||||
.fa-bar-chart:before {
|
||||
content: "\f080";
|
||||
}
|
||||
.fa-twitter-square:before {
|
||||
@@ -627,6 +627,7 @@
|
||||
.fa-twitter:before {
|
||||
content: "\f099";
|
||||
}
|
||||
.fa-facebook-f:before,
|
||||
.fa-facebook:before {
|
||||
content: "\f09a";
|
||||
}
|
||||
@@ -639,6 +640,7 @@
|
||||
.fa-credit-card:before {
|
||||
content: "\f09d";
|
||||
}
|
||||
.fa-feed:before,
|
||||
.fa-rss:before {
|
||||
content: "\f09e";
|
||||
}
|
||||
@@ -1276,7 +1278,8 @@
|
||||
.fa-male:before {
|
||||
content: "\f183";
|
||||
}
|
||||
.fa-gittip:before {
|
||||
.fa-gittip:before,
|
||||
.fa-gratipay:before {
|
||||
content: "\f184";
|
||||
}
|
||||
.fa-sun-o:before {
|
||||
@@ -1380,7 +1383,6 @@
|
||||
.fa-digg:before {
|
||||
content: "\f1a6";
|
||||
}
|
||||
.fa-pied-piper-square:before,
|
||||
.fa-pied-piper:before {
|
||||
content: "\f1a7";
|
||||
}
|
||||
@@ -1497,6 +1499,7 @@
|
||||
content: "\f1cc";
|
||||
}
|
||||
.fa-life-bouy:before,
|
||||
.fa-life-buoy:before,
|
||||
.fa-life-saver:before,
|
||||
.fa-support:before,
|
||||
.fa-life-ring:before {
|
||||
@@ -1519,6 +1522,8 @@
|
||||
.fa-git:before {
|
||||
content: "\f1d3";
|
||||
}
|
||||
.fa-y-combinator-square:before,
|
||||
.fa-yc-square:before,
|
||||
.fa-hacker-news:before {
|
||||
content: "\f1d4";
|
||||
}
|
||||
@@ -1564,3 +1569,458 @@
|
||||
.fa-bomb:before {
|
||||
content: "\f1e2";
|
||||
}
|
||||
.fa-soccer-ball-o:before,
|
||||
.fa-futbol-o:before {
|
||||
content: "\f1e3";
|
||||
}
|
||||
.fa-tty:before {
|
||||
content: "\f1e4";
|
||||
}
|
||||
.fa-binoculars:before {
|
||||
content: "\f1e5";
|
||||
}
|
||||
.fa-plug:before {
|
||||
content: "\f1e6";
|
||||
}
|
||||
.fa-slideshare:before {
|
||||
content: "\f1e7";
|
||||
}
|
||||
.fa-twitch:before {
|
||||
content: "\f1e8";
|
||||
}
|
||||
.fa-yelp:before {
|
||||
content: "\f1e9";
|
||||
}
|
||||
.fa-newspaper-o:before {
|
||||
content: "\f1ea";
|
||||
}
|
||||
.fa-wifi:before {
|
||||
content: "\f1eb";
|
||||
}
|
||||
.fa-calculator:before {
|
||||
content: "\f1ec";
|
||||
}
|
||||
.fa-paypal:before {
|
||||
content: "\f1ed";
|
||||
}
|
||||
.fa-google-wallet:before {
|
||||
content: "\f1ee";
|
||||
}
|
||||
.fa-cc-visa:before {
|
||||
content: "\f1f0";
|
||||
}
|
||||
.fa-cc-mastercard:before {
|
||||
content: "\f1f1";
|
||||
}
|
||||
.fa-cc-discover:before {
|
||||
content: "\f1f2";
|
||||
}
|
||||
.fa-cc-amex:before {
|
||||
content: "\f1f3";
|
||||
}
|
||||
.fa-cc-paypal:before {
|
||||
content: "\f1f4";
|
||||
}
|
||||
.fa-cc-stripe:before {
|
||||
content: "\f1f5";
|
||||
}
|
||||
.fa-bell-slash:before {
|
||||
content: "\f1f6";
|
||||
}
|
||||
.fa-bell-slash-o:before {
|
||||
content: "\f1f7";
|
||||
}
|
||||
.fa-trash:before {
|
||||
content: "\f1f8";
|
||||
}
|
||||
.fa-copyright:before {
|
||||
content: "\f1f9";
|
||||
}
|
||||
.fa-at:before {
|
||||
content: "\f1fa";
|
||||
}
|
||||
.fa-eyedropper:before {
|
||||
content: "\f1fb";
|
||||
}
|
||||
.fa-paint-brush:before {
|
||||
content: "\f1fc";
|
||||
}
|
||||
.fa-birthday-cake:before {
|
||||
content: "\f1fd";
|
||||
}
|
||||
.fa-area-chart:before {
|
||||
content: "\f1fe";
|
||||
}
|
||||
.fa-pie-chart:before {
|
||||
content: "\f200";
|
||||
}
|
||||
.fa-line-chart:before {
|
||||
content: "\f201";
|
||||
}
|
||||
.fa-lastfm:before {
|
||||
content: "\f202";
|
||||
}
|
||||
.fa-lastfm-square:before {
|
||||
content: "\f203";
|
||||
}
|
||||
.fa-toggle-off:before {
|
||||
content: "\f204";
|
||||
}
|
||||
.fa-toggle-on:before {
|
||||
content: "\f205";
|
||||
}
|
||||
.fa-bicycle:before {
|
||||
content: "\f206";
|
||||
}
|
||||
.fa-bus:before {
|
||||
content: "\f207";
|
||||
}
|
||||
.fa-ioxhost:before {
|
||||
content: "\f208";
|
||||
}
|
||||
.fa-angellist:before {
|
||||
content: "\f209";
|
||||
}
|
||||
.fa-cc:before {
|
||||
content: "\f20a";
|
||||
}
|
||||
.fa-shekel:before,
|
||||
.fa-sheqel:before,
|
||||
.fa-ils:before {
|
||||
content: "\f20b";
|
||||
}
|
||||
.fa-meanpath:before {
|
||||
content: "\f20c";
|
||||
}
|
||||
.fa-buysellads:before {
|
||||
content: "\f20d";
|
||||
}
|
||||
.fa-connectdevelop:before {
|
||||
content: "\f20e";
|
||||
}
|
||||
.fa-dashcube:before {
|
||||
content: "\f210";
|
||||
}
|
||||
.fa-forumbee:before {
|
||||
content: "\f211";
|
||||
}
|
||||
.fa-leanpub:before {
|
||||
content: "\f212";
|
||||
}
|
||||
.fa-sellsy:before {
|
||||
content: "\f213";
|
||||
}
|
||||
.fa-shirtsinbulk:before {
|
||||
content: "\f214";
|
||||
}
|
||||
.fa-simplybuilt:before {
|
||||
content: "\f215";
|
||||
}
|
||||
.fa-skyatlas:before {
|
||||
content: "\f216";
|
||||
}
|
||||
.fa-cart-plus:before {
|
||||
content: "\f217";
|
||||
}
|
||||
.fa-cart-arrow-down:before {
|
||||
content: "\f218";
|
||||
}
|
||||
.fa-diamond:before {
|
||||
content: "\f219";
|
||||
}
|
||||
.fa-ship:before {
|
||||
content: "\f21a";
|
||||
}
|
||||
.fa-user-secret:before {
|
||||
content: "\f21b";
|
||||
}
|
||||
.fa-motorcycle:before {
|
||||
content: "\f21c";
|
||||
}
|
||||
.fa-street-view:before {
|
||||
content: "\f21d";
|
||||
}
|
||||
.fa-heartbeat:before {
|
||||
content: "\f21e";
|
||||
}
|
||||
.fa-venus:before {
|
||||
content: "\f221";
|
||||
}
|
||||
.fa-mars:before {
|
||||
content: "\f222";
|
||||
}
|
||||
.fa-mercury:before {
|
||||
content: "\f223";
|
||||
}
|
||||
.fa-intersex:before,
|
||||
.fa-transgender:before {
|
||||
content: "\f224";
|
||||
}
|
||||
.fa-transgender-alt:before {
|
||||
content: "\f225";
|
||||
}
|
||||
.fa-venus-double:before {
|
||||
content: "\f226";
|
||||
}
|
||||
.fa-mars-double:before {
|
||||
content: "\f227";
|
||||
}
|
||||
.fa-venus-mars:before {
|
||||
content: "\f228";
|
||||
}
|
||||
.fa-mars-stroke:before {
|
||||
content: "\f229";
|
||||
}
|
||||
.fa-mars-stroke-v:before {
|
||||
content: "\f22a";
|
||||
}
|
||||
.fa-mars-stroke-h:before {
|
||||
content: "\f22b";
|
||||
}
|
||||
.fa-neuter:before {
|
||||
content: "\f22c";
|
||||
}
|
||||
.fa-genderless:before {
|
||||
content: "\f22d";
|
||||
}
|
||||
.fa-facebook-official:before {
|
||||
content: "\f230";
|
||||
}
|
||||
.fa-pinterest-p:before {
|
||||
content: "\f231";
|
||||
}
|
||||
.fa-whatsapp:before {
|
||||
content: "\f232";
|
||||
}
|
||||
.fa-server:before {
|
||||
content: "\f233";
|
||||
}
|
||||
.fa-user-plus:before {
|
||||
content: "\f234";
|
||||
}
|
||||
.fa-user-times:before {
|
||||
content: "\f235";
|
||||
}
|
||||
.fa-hotel:before,
|
||||
.fa-bed:before {
|
||||
content: "\f236";
|
||||
}
|
||||
.fa-viacoin:before {
|
||||
content: "\f237";
|
||||
}
|
||||
.fa-train:before {
|
||||
content: "\f238";
|
||||
}
|
||||
.fa-subway:before {
|
||||
content: "\f239";
|
||||
}
|
||||
.fa-medium:before {
|
||||
content: "\f23a";
|
||||
}
|
||||
.fa-yc:before,
|
||||
.fa-y-combinator:before {
|
||||
content: "\f23b";
|
||||
}
|
||||
.fa-optin-monster:before {
|
||||
content: "\f23c";
|
||||
}
|
||||
.fa-opencart:before {
|
||||
content: "\f23d";
|
||||
}
|
||||
.fa-expeditedssl:before {
|
||||
content: "\f23e";
|
||||
}
|
||||
.fa-battery-4:before,
|
||||
.fa-battery-full:before {
|
||||
content: "\f240";
|
||||
}
|
||||
.fa-battery-3:before,
|
||||
.fa-battery-three-quarters:before {
|
||||
content: "\f241";
|
||||
}
|
||||
.fa-battery-2:before,
|
||||
.fa-battery-half:before {
|
||||
content: "\f242";
|
||||
}
|
||||
.fa-battery-1:before,
|
||||
.fa-battery-quarter:before {
|
||||
content: "\f243";
|
||||
}
|
||||
.fa-battery-0:before,
|
||||
.fa-battery-empty:before {
|
||||
content: "\f244";
|
||||
}
|
||||
.fa-mouse-pointer:before {
|
||||
content: "\f245";
|
||||
}
|
||||
.fa-i-cursor:before {
|
||||
content: "\f246";
|
||||
}
|
||||
.fa-object-group:before {
|
||||
content: "\f247";
|
||||
}
|
||||
.fa-object-ungroup:before {
|
||||
content: "\f248";
|
||||
}
|
||||
.fa-sticky-note:before {
|
||||
content: "\f249";
|
||||
}
|
||||
.fa-sticky-note-o:before {
|
||||
content: "\f24a";
|
||||
}
|
||||
.fa-cc-jcb:before {
|
||||
content: "\f24b";
|
||||
}
|
||||
.fa-cc-diners-club:before {
|
||||
content: "\f24c";
|
||||
}
|
||||
.fa-clone:before {
|
||||
content: "\f24d";
|
||||
}
|
||||
.fa-balance-scale:before {
|
||||
content: "\f24e";
|
||||
}
|
||||
.fa-hourglass-o:before {
|
||||
content: "\f250";
|
||||
}
|
||||
.fa-hourglass-1:before,
|
||||
.fa-hourglass-start:before {
|
||||
content: "\f251";
|
||||
}
|
||||
.fa-hourglass-2:before,
|
||||
.fa-hourglass-half:before {
|
||||
content: "\f252";
|
||||
}
|
||||
.fa-hourglass-3:before,
|
||||
.fa-hourglass-end:before {
|
||||
content: "\f253";
|
||||
}
|
||||
.fa-hourglass:before {
|
||||
content: "\f254";
|
||||
}
|
||||
.fa-hand-grab-o:before,
|
||||
.fa-hand-rock-o:before {
|
||||
content: "\f255";
|
||||
}
|
||||
.fa-hand-stop-o:before,
|
||||
.fa-hand-paper-o:before {
|
||||
content: "\f256";
|
||||
}
|
||||
.fa-hand-scissors-o:before {
|
||||
content: "\f257";
|
||||
}
|
||||
.fa-hand-lizard-o:before {
|
||||
content: "\f258";
|
||||
}
|
||||
.fa-hand-spock-o:before {
|
||||
content: "\f259";
|
||||
}
|
||||
.fa-hand-pointer-o:before {
|
||||
content: "\f25a";
|
||||
}
|
||||
.fa-hand-peace-o:before {
|
||||
content: "\f25b";
|
||||
}
|
||||
.fa-trademark:before {
|
||||
content: "\f25c";
|
||||
}
|
||||
.fa-registered:before {
|
||||
content: "\f25d";
|
||||
}
|
||||
.fa-creative-commons:before {
|
||||
content: "\f25e";
|
||||
}
|
||||
.fa-gg:before {
|
||||
content: "\f260";
|
||||
}
|
||||
.fa-gg-circle:before {
|
||||
content: "\f261";
|
||||
}
|
||||
.fa-tripadvisor:before {
|
||||
content: "\f262";
|
||||
}
|
||||
.fa-odnoklassniki:before {
|
||||
content: "\f263";
|
||||
}
|
||||
.fa-odnoklassniki-square:before {
|
||||
content: "\f264";
|
||||
}
|
||||
.fa-get-pocket:before {
|
||||
content: "\f265";
|
||||
}
|
||||
.fa-wikipedia-w:before {
|
||||
content: "\f266";
|
||||
}
|
||||
.fa-safari:before {
|
||||
content: "\f267";
|
||||
}
|
||||
.fa-chrome:before {
|
||||
content: "\f268";
|
||||
}
|
||||
.fa-firefox:before {
|
||||
content: "\f269";
|
||||
}
|
||||
.fa-opera:before {
|
||||
content: "\f26a";
|
||||
}
|
||||
.fa-internet-explorer:before {
|
||||
content: "\f26b";
|
||||
}
|
||||
.fa-tv:before,
|
||||
.fa-television:before {
|
||||
content: "\f26c";
|
||||
}
|
||||
.fa-contao:before {
|
||||
content: "\f26d";
|
||||
}
|
||||
.fa-500px:before {
|
||||
content: "\f26e";
|
||||
}
|
||||
.fa-amazon:before {
|
||||
content: "\f270";
|
||||
}
|
||||
.fa-calendar-plus-o:before {
|
||||
content: "\f271";
|
||||
}
|
||||
.fa-calendar-minus-o:before {
|
||||
content: "\f272";
|
||||
}
|
||||
.fa-calendar-times-o:before {
|
||||
content: "\f273";
|
||||
}
|
||||
.fa-calendar-check-o:before {
|
||||
content: "\f274";
|
||||
}
|
||||
.fa-industry:before {
|
||||
content: "\f275";
|
||||
}
|
||||
.fa-map-pin:before {
|
||||
content: "\f276";
|
||||
}
|
||||
.fa-map-signs:before {
|
||||
content: "\f277";
|
||||
}
|
||||
.fa-map-o:before {
|
||||
content: "\f278";
|
||||
}
|
||||
.fa-map:before {
|
||||
content: "\f279";
|
||||
}
|
||||
.fa-commenting:before {
|
||||
content: "\f27a";
|
||||
}
|
||||
.fa-commenting-o:before {
|
||||
content: "\f27b";
|
||||
}
|
||||
.fa-houzz:before {
|
||||
content: "\f27c";
|
||||
}
|
||||
.fa-vimeo:before {
|
||||
content: "\f27d";
|
||||
}
|
||||
.fa-black-tie:before {
|
||||
content: "\f27e";
|
||||
}
|
||||
.fa-fonticons:before {
|
||||
content: "\f280";
|
||||
}
|
||||
|
||||
BIN
docs/static/fonts/FontAwesome.otf
vendored
BIN
docs/static/fonts/FontAwesome.otf
vendored
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.eot
vendored
Executable file → Normal file
Binary file not shown.
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
1064
docs/static/fonts/fontawesome-webfont.svg
vendored
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
|
Before Width: | Height: | Size: 248 KiB After Width: | Height: | Size: 348 KiB |
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.ttf
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff
vendored
Executable file → Normal file
Binary file not shown.
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
BIN
docs/static/fonts/fontawesome-webfont.woff2
vendored
Normal file
Binary file not shown.
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1019 B |
847
drive/drive.go
847
drive/drive.go
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
// Test Drive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil))
|
||||
fstests.NilObject = fs.Object((*drive.Object)(nil))
|
||||
fstests.RemoteName = "TestDrive:"
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
@@ -36,7 +36,7 @@ const (
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *FsDrive
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
@@ -51,9 +51,9 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileId := info.Id
|
||||
var body io.Reader = nil
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) {
|
||||
fileID := info.Id
|
||||
var body io.Reader
|
||||
body, err := googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -63,7 +63,7 @@ func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *dri
|
||||
params.Set("uploadType", "resumable")
|
||||
urls := "https://www.googleapis.com/upload/drive/v2/files"
|
||||
method := "POST"
|
||||
if fileId != "" {
|
||||
if fileID != "" {
|
||||
params.Set("setModifiedDate", "true")
|
||||
urls += "/{fileId}"
|
||||
method = "PUT"
|
||||
@@ -71,19 +71,20 @@ func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *dri
|
||||
urls += "?" + params.Encode()
|
||||
req, _ := http.NewRequest(method, urls, body)
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileId,
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
var res *http.Response
|
||||
f.call(&err, func() {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -203,19 +204,19 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
for try := 1; try <= maxTries; try++ {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d, %d/%d", start, reqSize, try, maxTries)
|
||||
rx.f.beginCall()
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debug(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, buf)
|
||||
rx.f.endCall(err)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
goto success
|
||||
again = false
|
||||
err = nil
|
||||
}
|
||||
fs.Debug(rx.remote, "Retrying chunk %d/%d, code=%d, err=%v", try, maxTries, StatusCode, err)
|
||||
return again, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debug(rx.remote, "Failed to send chunk")
|
||||
return nil, fs.RetryErrorf("Chunk upload failed - retry: code=%d, err=%v", StatusCode, err)
|
||||
success:
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
|
||||
@@ -1,36 +1,10 @@
|
||||
// Dropbox interface
|
||||
// Package dropbox provides an interface to Dropbox object storage
|
||||
package dropbox
|
||||
|
||||
/*
|
||||
Limitations of dropbox
|
||||
|
||||
File system is case insensitive
|
||||
|
||||
The datastore is limited to 100,000 records which therefore is the
|
||||
limit of the number of files that rclone can use on dropbox.
|
||||
|
||||
FIXME only open datastore if we need it?
|
||||
|
||||
FIXME Getting this sometimes
|
||||
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
|
||||
This is a JSON decode error - from Update / UploadByChunk
|
||||
- Caused by 500 error from dropbox
|
||||
- See https://github.com/stacktic/dropbox/issues/1
|
||||
- Possibly confusing dropbox with excess concurrency?
|
||||
|
||||
FIXME implement timeouts - need to get "github.com/stacktic/dropbox"
|
||||
and hence "golang.org/x/oauth2" which uses DefaultTransport unless it
|
||||
is set in the context passed into .Client()
|
||||
|
||||
func (db *Dropbox) client() *http.Client {
|
||||
return db.config.Client(oauth2.NoContext, db.token)
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
So pass in a context with HTTPClient set...
|
||||
*/
|
||||
|
||||
import (
|
||||
@@ -38,45 +12,51 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
uploadChunkSize = 64 * 1024 // chunk size for upload
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
datastoreName = "rclone"
|
||||
tableName = "metadata"
|
||||
md5sumField = "md5sum"
|
||||
mtimeField = "mtime"
|
||||
maxCommitRetries = 5
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "m8WRxJ6b1Z/Y25fDwJWS"
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
)
|
||||
|
||||
var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks aren't buffered into memory though so can set large.
|
||||
uploadChunkSize = fs.SizeSuffix(128 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
fs.Register(&fs.Info{
|
||||
Name: "dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: configHelper,
|
||||
Options: []fs.Option{{
|
||||
Name: "app_key",
|
||||
Help: "Dropbox App Key - leave blank to use rclone's.",
|
||||
Help: "Dropbox App Key - leave blank normally.",
|
||||
}, {
|
||||
Name: "app_secret",
|
||||
Help: "Dropbox App Secret - leave blank to use rclone's.",
|
||||
Help: "Dropbox App Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Configuration helper - called after the user has put in the defaults
|
||||
@@ -91,7 +71,10 @@ func configHelper(name string) {
|
||||
}
|
||||
|
||||
// Get a dropbox
|
||||
db := newDropbox(name)
|
||||
db, err := newDropbox(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create dropbox client: %v", err)
|
||||
}
|
||||
|
||||
// This method will ask the user to visit an URL and paste the generated code.
|
||||
if err := db.Auth(); err != nil {
|
||||
@@ -109,37 +92,43 @@ func configHelper(name string) {
|
||||
}
|
||||
}
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix
|
||||
slashRootSlash string // root with "/" prefix and postix
|
||||
datastoreManager *dropbox.DatastoreManager
|
||||
datastore *dropbox.Datastore
|
||||
table *dropbox.Table
|
||||
datastoreMutex sync.Mutex // lock this when using the datastore
|
||||
datastoreErr error // pending errors on the datastore
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
}
|
||||
|
||||
// FsObjectDropbox describes a dropbox object
|
||||
type FsObjectDropbox struct {
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
md5sum string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
// Object describes a dropbox object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this FsDropbox to a string
|
||||
func (f *FsDropbox) String() string {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Dropbox root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Makes a new dropbox from the config
|
||||
func newDropbox(name string) *dropbox.Dropbox {
|
||||
func newDropbox(name string) (*dropbox.Dropbox, error) {
|
||||
db := dropbox.NewDropbox()
|
||||
|
||||
appKey := fs.ConfigFile.MustValue(name, "app_key")
|
||||
@@ -148,34 +137,37 @@ func newDropbox(name string) *dropbox.Dropbox {
|
||||
}
|
||||
appSecret := fs.ConfigFile.MustValue(name, "app_secret")
|
||||
if appSecret == "" {
|
||||
appSecret = rcloneAppSecret
|
||||
appSecret = fs.Reveal(rcloneAppSecret)
|
||||
}
|
||||
|
||||
db.SetAppInfo(appKey, appSecret)
|
||||
|
||||
return db
|
||||
err := db.SetAppInfo(appKey, appSecret)
|
||||
return db, err
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsDropbox from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
db := newDropbox(name)
|
||||
f := &FsDropbox{
|
||||
db: db,
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
}
|
||||
db, err := newDropbox(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
db: db,
|
||||
}
|
||||
f.setRoot(root)
|
||||
|
||||
// Read the token from the config file
|
||||
token := fs.ConfigFile.MustValue(name, "token")
|
||||
|
||||
// Set our custom context which enables our custom transport for timeouts etc
|
||||
db.SetContext(oauthutil.Context())
|
||||
|
||||
// Authorize the client
|
||||
db.SetAccessToken(token)
|
||||
|
||||
// Make a db to store rclone metadata in
|
||||
f.datastoreManager = db.NewDatastoreManager()
|
||||
|
||||
// Open the datastore in the background
|
||||
go f.openDataStore()
|
||||
|
||||
// See if the root is actually an object
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDir {
|
||||
@@ -194,46 +186,24 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *FsDropbox) setRoot(root string) {
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
f.slashRoot = "/" + f.root
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if f.root != "" {
|
||||
if lowerCaseRoot != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
|
||||
// Opens the datastore in f
|
||||
func (f *FsDropbox) openDataStore() {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
fs.Debug(f, "Open rclone datastore")
|
||||
// Open the rclone datastore
|
||||
var err error
|
||||
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
|
||||
// Get the table we are using
|
||||
f.table, err = f.datastore.GetTable(tableName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore table: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
fs.Debug(f, "Open rclone datastore finished")
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &FsObjectDropbox{
|
||||
dropbox: f,
|
||||
remote: remote,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetadataFromEntry(info)
|
||||
@@ -247,34 +217,43 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.O
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsDropbox) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Strips the root off entry and returns it
|
||||
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
|
||||
path := entry.Path
|
||||
if strings.HasPrefix(path, f.slashRootSlash) {
|
||||
path = path[len(f.slashRootSlash):]
|
||||
// Strips the root off path and returns it
|
||||
func (f *Fs) stripRoot(path string) *string {
|
||||
lowercase := strings.ToLower(path)
|
||||
|
||||
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
|
||||
return nil
|
||||
}
|
||||
return path
|
||||
|
||||
stripped := path[len(f.slashRootSlash):]
|
||||
return &stripped
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of FsObjects
|
||||
func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
func (f *Fs) list(out fs.ObjectsChan) {
|
||||
// Track path component case, it could be different for entries coming from DropBox API
|
||||
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
|
||||
// and https://github.com/ncw/rclone/issues/53
|
||||
nameTree := newNameTree()
|
||||
cursor := ""
|
||||
for {
|
||||
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list: %s", err)
|
||||
break
|
||||
} else {
|
||||
if deltaPage.Reset && cursor != "" {
|
||||
fs.Log(f, "Unexpected reset during listing - try again")
|
||||
fs.ErrorLog(f, "Unexpected reset during listing - try again")
|
||||
fs.Stats.Error()
|
||||
break
|
||||
}
|
||||
@@ -284,20 +263,38 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
entry := deltaEntry.Entry
|
||||
if entry == nil {
|
||||
// This notifies of a deleted object
|
||||
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
|
||||
key := metadataKey(deltaEntry.Path) // Path is lowercased
|
||||
err := f.deleteMetadata(key)
|
||||
if err != nil {
|
||||
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
|
||||
// Don't accumulate Error here
|
||||
} else {
|
||||
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
if entry.IsDir {
|
||||
// ignore directories
|
||||
lastSlashIndex := strings.LastIndex(entry.Path, "/")
|
||||
|
||||
var parentPath string
|
||||
if lastSlashIndex == 0 {
|
||||
parentPath = ""
|
||||
} else {
|
||||
path := f.stripRoot(entry)
|
||||
out <- f.newFsObjectWithInfo(path, entry)
|
||||
parentPath = entry.Path[1:lastSlashIndex]
|
||||
}
|
||||
lastComponent := entry.Path[lastSlashIndex+1:]
|
||||
|
||||
if entry.IsDir {
|
||||
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
|
||||
} else {
|
||||
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
|
||||
if parentPathCorrectCase != nil {
|
||||
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
} else {
|
||||
nameTree.PutFile(parentPath, lastComponent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -307,10 +304,21 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
cursor = deltaPage.Cursor.Cursor
|
||||
}
|
||||
}
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
|
||||
path := f.stripRoot("/" + caseCorrectFilePath)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
return
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
}
|
||||
nameTree.WalkFiles(f.root, walkFunc)
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) List() fs.ObjectsChan {
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
@@ -319,23 +327,29 @@ func (f *FsDropbox) List() fs.ObjectsChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsDropbox) ListDir() fs.DirChan {
|
||||
// ListDir walks the path returning a channel of FsObjects
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list directories in root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
|
||||
} else {
|
||||
for i := range entry.Contents {
|
||||
entry := &entry.Contents[i]
|
||||
if entry.IsDir {
|
||||
name := f.stripRoot(entry.Path)
|
||||
if name == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- &fs.Dir{
|
||||
Name: f.stripRoot(entry),
|
||||
Name: *name,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: int64(entry.Bytes),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
}
|
||||
}
|
||||
@@ -365,14 +379,17 @@ func (rc *readCloser) Close() error {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectDropbox{dropbox: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsDropbox) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil {
|
||||
if entry.IsDir {
|
||||
@@ -387,7 +404,7 @@ func (f *FsDropbox) Mkdir() error {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsDropbox) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -398,9 +415,41 @@ func (f *FsDropbox) Rmdir() error {
|
||||
return f.Purge()
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (fs *FsDropbox) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Copy(srcPath, dstPath, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %s", err)
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -408,148 +457,114 @@ func (fs *FsDropbox) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDropbox) Purge() error {
|
||||
// Delete metadata first
|
||||
var wg sync.WaitGroup
|
||||
to_be_deleted := f.List()
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range to_be_deleted {
|
||||
o := dst.(*FsObjectDropbox)
|
||||
o.deleteMetadata()
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
func (f *Fs) Purge() error {
|
||||
// Let dropbox delete the filesystem tree
|
||||
_, err := f.db.Delete(f.slashRoot)
|
||||
return err
|
||||
}
|
||||
|
||||
// Tries the transaction in fn then calls commit, repeating until retry limit
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) transaction(fn func() error) error {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return f.datastoreErr
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
var err error
|
||||
for i := 1; i <= maxCommitRetries; i++ {
|
||||
err = fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.datastore.Commit()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcPath := srcObj.remotePath()
|
||||
dstPath := dstObj.remotePath()
|
||||
entry, err := f.db.Move(srcPath, dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to commit metadata changes: %s", err)
|
||||
return nil, fmt.Errorf("Move failed: %s", err)
|
||||
}
|
||||
dstObj.setMetadataFromEntry(entry)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDeleted {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.db.Move(srcFs.slashRoot, f.slashRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("MoveDir failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this key
|
||||
func (f *FsDropbox) deleteMetadata(key string) error {
|
||||
return f.transaction(func() error {
|
||||
record, err := f.table.Get(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get record: %s", err)
|
||||
}
|
||||
if record == nil {
|
||||
return nil
|
||||
}
|
||||
record.DeleteRecord()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Reads the record attached to key
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return nil, f.datastoreErr
|
||||
}
|
||||
return f.table.Get(key)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectDropbox) Fs() fs.Fs {
|
||||
return o.dropbox
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectDropbox) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectDropbox) Remote() string {
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
//
|
||||
// FIXME has to download the file!
|
||||
func (o *FsObjectDropbox) Md5sum() (string, error) {
|
||||
if o.md5sum != "" {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return "", fmt.Errorf("Failed to read metadata: %s", err)
|
||||
|
||||
}
|
||||
|
||||
// For pre-existing files which have no md5sum can read it and set it?
|
||||
|
||||
// in, err := o.Open()
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// defer in.Close()
|
||||
// hash := md5.New()
|
||||
// _, err = io.Copy(hash, in)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.md5sum, nil
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectDropbox) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a dropbox.Entry
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = int64(info.Bytes)
|
||||
func (o *Object) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = info.Bytes
|
||||
o.modTime = time.Time(info.ClientMtime)
|
||||
o.hasMetadata = true
|
||||
}
|
||||
|
||||
// Reads the entry from dropbox
|
||||
func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
func (o *Object) readEntry() (*dropbox.Entry, error) {
|
||||
entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Error reading file: %s", err)
|
||||
return nil, fmt.Errorf("Error reading file: %s", err)
|
||||
@@ -558,7 +573,7 @@ func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) {
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
func (o *Object) readEntryAndSetMetadata() error {
|
||||
// Last resort set time from client
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
@@ -572,8 +587,8 @@ func (o *FsObjectDropbox) readEntryAndSetMetadata() error {
|
||||
}
|
||||
|
||||
// Returns the remote path for the object
|
||||
func (o *FsObjectDropbox) remotePath() string {
|
||||
return o.dropbox.slashRootSlash + o.remote
|
||||
func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
@@ -586,61 +601,15 @@ func metadataKey(path string) string {
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *FsObjectDropbox) metadataKey() string {
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
if o.md5sum != "" {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// fs.Debug(o, "Reading metadata from datastore")
|
||||
record, err := o.dropbox.readRecord(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Debug(o, "Couldn't read metadata: %s", err)
|
||||
record = nil
|
||||
}
|
||||
|
||||
if record != nil {
|
||||
// Read md5sum
|
||||
md5sumInterface, ok, err := record.Get(md5sumField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find md5sum in record")
|
||||
} else {
|
||||
md5sum, ok := md5sumInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "md5sum not a string")
|
||||
} else {
|
||||
o.md5sum = md5sum
|
||||
}
|
||||
}
|
||||
|
||||
// read mtime
|
||||
mtimeInterface, ok, err := record.Get(mtimeField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find mtime in record")
|
||||
} else {
|
||||
mtime, ok := mtimeInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "mtime not a string")
|
||||
} else {
|
||||
modTime, err := time.Parse(timeFormatIn, mtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Last resort
|
||||
return o.readEntryAndSetMetadata()
|
||||
}
|
||||
@@ -649,7 +618,7 @@ func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -658,69 +627,22 @@ func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object into the record
|
||||
// FIXME if we don't set md5sum what will that do?
|
||||
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
|
||||
key := o.metadataKey()
|
||||
// fs.Debug(o, "Writing metadata to datastore")
|
||||
return o.dropbox.transaction(func() error {
|
||||
record, err := o.dropbox.table.GetOrInsert(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't read record: %s", err)
|
||||
}
|
||||
|
||||
if md5sum != "" {
|
||||
err = record.Set(md5sumField, md5sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set md5sum record: %s", err)
|
||||
}
|
||||
o.md5sum = md5sum
|
||||
}
|
||||
|
||||
if !modTime.IsZero() {
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
err := record.Set(mtimeField, mtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set mtime record: %s", err)
|
||||
}
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this file
|
||||
//
|
||||
// It logs any errors
|
||||
func (o *FsObjectDropbox) deleteMetadata() {
|
||||
fs.Debug(o, "Deleting metadata from datastore")
|
||||
err := o.dropbox.deleteMetadata(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Log(o, "Error deleting metadata: %v", err)
|
||||
fs.Stats.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
|
||||
err := o.setModTimeAndMd5sum(modTime, "")
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, err.Error())
|
||||
}
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectDropbox) Storable() bool {
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0)
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.fs.db.Download(o.remotePath(), "", 0)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -729,28 +651,32 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Calculate md5sum as we upload it
|
||||
hash := md5.New()
|
||||
rc := &readCloser{in: io.TeeReader(in, hash)}
|
||||
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.ErrorLog(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
o.setMetadataFromEntry(entry)
|
||||
|
||||
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.setModTimeAndMd5sum(modTime, md5sum)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDropbox) Remove() error {
|
||||
o.deleteMetadata()
|
||||
_, err := o.dropbox.db.Delete(o.remotePath())
|
||||
func (o *Object) Remove() error {
|
||||
_, err := o.fs.db.Delete(o.remotePath())
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsDropbox{}
|
||||
var _ fs.Purger = &FsDropbox{}
|
||||
var _ fs.Object = &FsObjectDropbox{}
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Dropbox filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil))
|
||||
fstests.NilObject = fs.Object((*dropbox.Object)(nil))
|
||||
fstests.RemoteName = "TestDropbox:"
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
179
dropbox/nametree.go
Normal file
179
dropbox/nametree.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
type nameTreeNode struct {
|
||||
// Map from lowercase directory name to tree node
|
||||
Directories map[string]*nameTreeNode
|
||||
|
||||
// Map from file name (case sensitive) to dropbox entry
|
||||
Files map[string]*dropbox.Entry
|
||||
|
||||
// Empty string if exact case is unknown or root node
|
||||
CaseCorrectName string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func newNameTreeNode(caseCorrectName string) *nameTreeNode {
|
||||
return &nameTreeNode{
|
||||
CaseCorrectName: caseCorrectName,
|
||||
Directories: make(map[string]*nameTreeNode),
|
||||
Files: make(map[string]*dropbox.Entry),
|
||||
}
|
||||
}
|
||||
|
||||
func newNameTree() *nameTreeNode {
|
||||
return newNameTreeNode("")
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) String() string {
|
||||
if len(tree.CaseCorrectName) == 0 {
|
||||
return "nameTreeNode/<root>"
|
||||
}
|
||||
return fmt.Sprintf("nameTreeNode/%q", tree.CaseCorrectName)
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
|
||||
if len(path) == 0 {
|
||||
// no lookup required, just return root
|
||||
return tree
|
||||
}
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if len(component) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
lookup := current.Directories[lowercase]
|
||||
if lookup == nil {
|
||||
lookup = newNameTreeNode("")
|
||||
current.Directories[lowercase] = lookup
|
||||
}
|
||||
|
||||
current = lookup
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
|
||||
if len(caseCorrectDirectoryName) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName)
|
||||
directory := node.Directories[lowerCaseDirectoryName]
|
||||
if directory == nil {
|
||||
directory = newNameTreeNode(caseCorrectDirectoryName)
|
||||
node.Directories[lowerCaseDirectoryName] = directory
|
||||
} else {
|
||||
if len(directory.CaseCorrectName) > 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
directory.CaseCorrectName = caseCorrectDirectoryName
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) PutFile(parentPath string, caseCorrectFileName string, dropboxEntry *dropbox.Entry) {
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if node.Files[caseCorrectFileName] != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node.Files[caseCorrectFileName] = dropboxEntry
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
|
||||
if path == "" {
|
||||
empty := ""
|
||||
return &empty
|
||||
}
|
||||
|
||||
var result bytes.Buffer
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if component == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
current = current.Directories[lowercase]
|
||||
if current == nil || current.CaseCorrectName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _ = result.WriteString("/")
|
||||
_, _ = result.WriteString(current.CaseCorrectName)
|
||||
}
|
||||
|
||||
resultString := result.String()
|
||||
return &resultString
|
||||
}
|
||||
|
||||
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
|
||||
|
||||
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) {
|
||||
var prefix string
|
||||
if currentPath == "" {
|
||||
prefix = ""
|
||||
} else {
|
||||
prefix = currentPath + "/"
|
||||
}
|
||||
|
||||
for name, entry := range tree.Files {
|
||||
walkFunc(prefix+name, entry)
|
||||
}
|
||||
|
||||
for lowerCaseName, directory := range tree.Directories {
|
||||
caseCorrectName := directory.CaseCorrectName
|
||||
if caseCorrectName == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
|
||||
continue
|
||||
}
|
||||
|
||||
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) {
|
||||
node := tree.getTreeNode(rootPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
node.walkFilesRec(rootPath, walkFunc)
|
||||
}
|
||||
124
dropbox/nametree_test.go
Normal file
124
dropbox/nametree_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
dropboxapi "github.com/stacktic/dropbox"
|
||||
)
|
||||
|
||||
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
|
||||
if !shouldBeTrue {
|
||||
t.Fatal(failMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryName(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a/b", "C")
|
||||
|
||||
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
|
||||
|
||||
a := tree.Directories["a"]
|
||||
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
|
||||
|
||||
b := a.Directories["b"]
|
||||
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
|
||||
|
||||
c := b.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("/a", "C")
|
||||
tree.PutCaseCorrectDirectoryName("b/", "C")
|
||||
tree.PutCaseCorrectDirectoryName("a//b", "C")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("", "C")
|
||||
|
||||
c := tree.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestGetPathWithCorrectCase(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a", "C")
|
||||
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
|
||||
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalk(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkWithPrefix(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("A", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkIncompleteTree(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := newNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
t.Fatal("Should not be called")
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
|
||||
}
|
||||
296
fs/accounting.go
296
fs/accounting.go
@@ -7,10 +7,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VividCortex/ewma"
|
||||
"github.com/tsenart/tb"
|
||||
)
|
||||
|
||||
@@ -28,41 +30,85 @@ func startTokenBucket() {
|
||||
}
|
||||
}
|
||||
|
||||
// Stringset holds some strings
|
||||
type StringSet map[string]bool
|
||||
// stringSet holds a set of strings
|
||||
type stringSet map[string]struct{}
|
||||
|
||||
// Strings returns all the strings in the StringSet
|
||||
func (ss StringSet) Strings() []string {
|
||||
strings := make([]string, 0, len(ss))
|
||||
for k := range ss {
|
||||
strings = append(strings, k)
|
||||
// inProgress holds a synchronizes map of in progress transfers
|
||||
type inProgress struct {
|
||||
mu sync.Mutex
|
||||
m map[string]*Account
|
||||
}
|
||||
|
||||
// newInProgress makes a new inProgress object
|
||||
func newInProgress() *inProgress {
|
||||
return &inProgress{
|
||||
m: make(map[string]*Account, Config.Transfers),
|
||||
}
|
||||
return strings
|
||||
}
|
||||
|
||||
// String returns all the strings in the StringSet joined by comma
|
||||
func (ss StringSet) String() string {
|
||||
return strings.Join(ss.Strings(), ", ")
|
||||
// set marks the name as in progress
|
||||
func (ip *inProgress) set(name string, acc *Account) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
ip.m[name] = acc
|
||||
}
|
||||
|
||||
// Stats limits and accounts all transfers
|
||||
// clear marks the name as no longer in progress
|
||||
func (ip *inProgress) clear(name string) {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
delete(ip.m, name)
|
||||
}
|
||||
|
||||
// get gets the account for name, of nil if not found
|
||||
func (ip *inProgress) get(name string) *Account {
|
||||
ip.mu.Lock()
|
||||
defer ip.mu.Unlock()
|
||||
return ip.m[name]
|
||||
}
|
||||
|
||||
// Strings returns all the strings in the stringSet
|
||||
func (ss stringSet) Strings() []string {
|
||||
strings := make([]string, 0, len(ss))
|
||||
for name := range ss {
|
||||
var out string
|
||||
if acc := Stats.inProgress.get(name); acc != nil {
|
||||
out = acc.String()
|
||||
} else {
|
||||
out = name
|
||||
}
|
||||
strings = append(strings, " * "+out)
|
||||
}
|
||||
sorted := sort.StringSlice(strings)
|
||||
sorted.Sort()
|
||||
return sorted
|
||||
}
|
||||
|
||||
// String returns all the file names in the stringSet joined by newline
|
||||
func (ss stringSet) String() string {
|
||||
return strings.Join(ss.Strings(), "\n")
|
||||
}
|
||||
|
||||
// StatsInfo limits and accounts all transfers
|
||||
type StatsInfo struct {
|
||||
lock sync.RWMutex
|
||||
bytes int64
|
||||
errors int64
|
||||
checks int64
|
||||
checking StringSet
|
||||
checking stringSet
|
||||
transfers int64
|
||||
transferring StringSet
|
||||
transferring stringSet
|
||||
start time.Time
|
||||
inProgress *inProgress
|
||||
}
|
||||
|
||||
// NewStats cretates an initialised StatsInfo
|
||||
func NewStats() *StatsInfo {
|
||||
return &StatsInfo{
|
||||
checking: make(StringSet, Config.Checkers),
|
||||
transferring: make(StringSet, Config.Transfers),
|
||||
checking: make(stringSet, Config.Checkers),
|
||||
transferring: make(stringSet, Config.Transfers),
|
||||
start: time.Now(),
|
||||
inProgress: newInProgress(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,29 +117,30 @@ func (s *StatsInfo) String() string {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
dt := time.Now().Sub(s.start)
|
||||
dt_seconds := dt.Seconds()
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / 1024 / dt_seconds
|
||||
speed = float64(s.bytes) / 1024 / dtSeconds
|
||||
}
|
||||
dtRounded := dt - (dt % (time.Second / 10))
|
||||
buf := &bytes.Buffer{}
|
||||
fmt.Fprintf(buf, `
|
||||
Transferred: %10d Bytes (%7.2f kByte/s)
|
||||
Errors: %10d
|
||||
Checks: %10d
|
||||
Transferred: %10d
|
||||
Elapsed time: %v
|
||||
Elapsed time: %10v
|
||||
`,
|
||||
s.bytes, speed,
|
||||
s.errors,
|
||||
s.checks,
|
||||
s.transfers,
|
||||
dt)
|
||||
dtRounded)
|
||||
if len(s.checking) > 0 {
|
||||
fmt.Fprintf(buf, "Checking: %s\n", s.checking)
|
||||
fmt.Fprintf(buf, "Checking:\n%s\n", s.checking)
|
||||
}
|
||||
if len(s.transferring) > 0 {
|
||||
fmt.Fprintf(buf, "Transferring: %s\n", s.transferring)
|
||||
fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
@@ -124,6 +171,23 @@ func (s *StatsInfo) GetErrors() int64 {
|
||||
return s.errors
|
||||
}
|
||||
|
||||
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
|
||||
func (s *StatsInfo) ResetCounters() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.bytes = 0
|
||||
s.errors = 0
|
||||
s.checks = 0
|
||||
s.transfers = 0
|
||||
}
|
||||
|
||||
// ResetErrors sets the errors count to 0
|
||||
func (s *StatsInfo) ResetErrors() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.errors = 0
|
||||
}
|
||||
|
||||
// Errored returns whether there have been any errors
|
||||
func (s *StatsInfo) Errored() bool {
|
||||
s.lock.RLock()
|
||||
@@ -135,14 +199,14 @@ func (s *StatsInfo) Errored() bool {
|
||||
func (s *StatsInfo) Error() {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.errors += 1
|
||||
s.errors++
|
||||
}
|
||||
|
||||
// Checking adds a check into the stats
|
||||
func (s *StatsInfo) Checking(o Object) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.checking[o.Remote()] = true
|
||||
s.checking[o.Remote()] = struct{}{}
|
||||
}
|
||||
|
||||
// DoneChecking removes a check from the stats
|
||||
@@ -150,14 +214,21 @@ func (s *StatsInfo) DoneChecking(o Object) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
delete(s.checking, o.Remote())
|
||||
s.checks += 1
|
||||
s.checks++
|
||||
}
|
||||
|
||||
// GetTransfers reads the number of transfers
|
||||
func (s *StatsInfo) GetTransfers() int64 {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.transfers
|
||||
}
|
||||
|
||||
// Transferring adds a transfer into the stats
|
||||
func (s *StatsInfo) Transferring(o Object) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
s.transferring[o.Remote()] = true
|
||||
s.transferring[o.Remote()] = struct{}{}
|
||||
}
|
||||
|
||||
// DoneTransferring removes a transfer from the stats
|
||||
@@ -165,30 +236,88 @@ func (s *StatsInfo) DoneTransferring(o Object) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
delete(s.transferring, o.Remote())
|
||||
s.transfers += 1
|
||||
s.transfers++
|
||||
}
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
in io.ReadCloser
|
||||
bytes int64
|
||||
// The mutex is to make sure Read() and Close() aren't called
|
||||
// concurrently. Unfortunately the persistent connection loop
|
||||
// in http transport calls Read() after Do() returns on
|
||||
// CancelRequest so this race can happen when it apparently
|
||||
// shouldn't.
|
||||
mu sync.Mutex
|
||||
in io.ReadCloser
|
||||
size int64
|
||||
name string
|
||||
statmu sync.Mutex // Separate mutex for stat values.
|
||||
bytes int64 // Total number of bytes read
|
||||
start time.Time // Start time of first read
|
||||
lpTime time.Time // Time of last average measurement
|
||||
lpBytes int // Number of bytes read since last measurement
|
||||
avg ewma.MovingAverage // Moving average of last few measurements
|
||||
closed bool // set if the file is closed
|
||||
exit chan struct{} // channel that will be closed when transfer is finished
|
||||
}
|
||||
|
||||
// NewAccount makes a Account reader
|
||||
func NewAccount(in io.ReadCloser) *Account {
|
||||
return &Account{
|
||||
in: in,
|
||||
// NewAccount makes a Account reader for an object
|
||||
func NewAccount(in io.ReadCloser, obj Object) *Account {
|
||||
acc := &Account{
|
||||
in: in,
|
||||
size: obj.Size(),
|
||||
name: obj.Remote(),
|
||||
exit: make(chan struct{}),
|
||||
avg: ewma.NewMovingAverage(),
|
||||
lpTime: time.Now(),
|
||||
}
|
||||
go acc.averageLoop()
|
||||
Stats.inProgress.set(acc.name, acc)
|
||||
return acc
|
||||
}
|
||||
|
||||
func (file *Account) averageLoop() {
|
||||
tick := time.NewTicker(time.Second)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
select {
|
||||
case now := <-tick.C:
|
||||
file.statmu.Lock()
|
||||
// Add average of last second.
|
||||
elapsed := now.Sub(file.lpTime).Seconds()
|
||||
avg := float64(file.lpBytes) / elapsed
|
||||
file.avg.Add(avg)
|
||||
file.lpBytes = 0
|
||||
file.lpTime = now
|
||||
// Unlock stats
|
||||
file.statmu.Unlock()
|
||||
case <-file.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *Account) Read(p []byte) (n int, err error) {
|
||||
n, err = file.in.Read(p)
|
||||
file.bytes += int64(n)
|
||||
Stats.Bytes(int64(n))
|
||||
if err == io.EOF {
|
||||
// FIXME Do something?
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
|
||||
// Set start time.
|
||||
file.statmu.Lock()
|
||||
if file.start.IsZero() {
|
||||
file.start = time.Now()
|
||||
}
|
||||
file.statmu.Unlock()
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
|
||||
// Update Stats
|
||||
file.statmu.Lock()
|
||||
file.lpBytes += n
|
||||
file.bytes += int64(n)
|
||||
file.statmu.Unlock()
|
||||
|
||||
Stats.Bytes(int64(n))
|
||||
|
||||
// Limit the transfer speed if required
|
||||
if tokenBucket != nil {
|
||||
tokenBucket.Wait(int64(n))
|
||||
@@ -196,9 +325,98 @@ func (file *Account) Read(p []byte) (n int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Progress returns bytes read as well as the size.
|
||||
// Size can be <= 0 if the size is unknown.
|
||||
func (file *Account) Progress() (bytes, size int64) {
|
||||
if file == nil {
|
||||
return 0, 0
|
||||
}
|
||||
file.statmu.Lock()
|
||||
if bytes > size {
|
||||
size = 0
|
||||
}
|
||||
defer file.statmu.Unlock()
|
||||
return file.bytes, file.size
|
||||
}
|
||||
|
||||
// Speed returns the speed of the current file transfer
|
||||
// in bytes per second, as well a an exponentially weighted moving average
|
||||
// If no read has completed yet, 0 is returned for both values.
|
||||
func (file *Account) Speed() (bps, current float64) {
|
||||
if file == nil {
|
||||
return 0, 0
|
||||
}
|
||||
file.statmu.Lock()
|
||||
defer file.statmu.Unlock()
|
||||
if file.bytes == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
// Calculate speed from first read.
|
||||
total := float64(time.Now().Sub(file.start)) / float64(time.Second)
|
||||
bps = float64(file.bytes) / total
|
||||
current = file.avg.Value()
|
||||
return
|
||||
}
|
||||
|
||||
// ETA returns the ETA of the current operation,
|
||||
// rounded to full seconds.
|
||||
// If the ETA cannot be determined 'ok' returns false.
|
||||
func (file *Account) ETA() (eta time.Duration, ok bool) {
|
||||
if file == nil || file.size <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
file.statmu.Lock()
|
||||
defer file.statmu.Unlock()
|
||||
if file.bytes == 0 {
|
||||
return 0, false
|
||||
}
|
||||
left := file.size - file.bytes
|
||||
if left <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
avg := file.avg.Value()
|
||||
if avg <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
seconds := float64(left) / file.avg.Value()
|
||||
|
||||
return time.Duration(time.Second * time.Duration(int(seconds))), true
|
||||
}
|
||||
|
||||
// String produces stats for this file
|
||||
func (file *Account) String() string {
|
||||
a, b := file.Progress()
|
||||
avg, cur := file.Speed()
|
||||
eta, etaok := file.ETA()
|
||||
etas := "-"
|
||||
if etaok {
|
||||
if eta > 0 {
|
||||
etas = fmt.Sprintf("%v", eta)
|
||||
} else {
|
||||
etas = "0s"
|
||||
}
|
||||
}
|
||||
name := []rune(file.name)
|
||||
if len(name) > 45 {
|
||||
where := len(name) - 42
|
||||
name = append([]rune{'.', '.', '.'}, name[where:]...)
|
||||
}
|
||||
if b <= 0 {
|
||||
return fmt.Sprintf("%45s: avg:%7.1f, cur: %6.1f kByte/s. ETA: %s", string(name), avg/1024, cur/1024, etas)
|
||||
}
|
||||
return fmt.Sprintf("%45s: %2d%% done. avg: %6.1f, cur: %6.1f kByte/s. ETA: %s", string(name), int(100*float64(a)/float64(b)), avg/1024, cur/1024, etas)
|
||||
}
|
||||
|
||||
// Close the object
|
||||
func (file *Account) Close() error {
|
||||
// FIXME do something?
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
if file.closed {
|
||||
return nil
|
||||
}
|
||||
file.closed = true
|
||||
close(file.exit)
|
||||
Stats.inProgress.clear(file.name)
|
||||
return file.in.Close()
|
||||
}
|
||||
|
||||
|
||||
199
fs/buffer.go
Normal file
199
fs/buffer.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// asyncReader will do async read-ahead from the input reader
|
||||
// and make the data available as an io.Reader.
|
||||
// This should be fully transparent, except that once an error
|
||||
// has been returned from the Reader, it will not recover.
|
||||
type asyncReader struct {
|
||||
in io.ReadCloser // Input reader
|
||||
ready chan *buffer // Buffers ready to be handed to the reader
|
||||
reuse chan *buffer // Buffers to reuse for input reading
|
||||
exit chan struct{} // Closes when finished
|
||||
buffers int // Number of buffers
|
||||
err error // If an error has occurred it is here
|
||||
cur *buffer // Current buffer being served
|
||||
exited chan struct{} // Channel is closed been the async reader shuts down
|
||||
closed bool // Has the parent reader been closed?
|
||||
}
|
||||
|
||||
// newAsyncReader returns a reader that will asynchronously read from
|
||||
// the supplied Reader into a number of buffers each with a given size.
|
||||
// It will start reading from the input at once, maybe even before this
|
||||
// function has returned.
|
||||
// The input can be read from the returned reader.
|
||||
// When done use Close to release the buffers and close the supplied input.
|
||||
func newAsyncReader(rd io.ReadCloser, buffers, size int) (io.ReadCloser, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("buffer size too small")
|
||||
}
|
||||
if buffers <= 0 {
|
||||
return nil, fmt.Errorf("number of buffers too small")
|
||||
}
|
||||
if rd == nil {
|
||||
return nil, fmt.Errorf("nil reader supplied")
|
||||
}
|
||||
a := &asyncReader{}
|
||||
a.init(rd, buffers, size)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *asyncReader) init(rd io.ReadCloser, buffers, size int) {
|
||||
a.in = rd
|
||||
a.ready = make(chan *buffer, buffers)
|
||||
a.reuse = make(chan *buffer, buffers)
|
||||
a.exit = make(chan struct{}, 0)
|
||||
a.exited = make(chan struct{}, 0)
|
||||
a.buffers = buffers
|
||||
a.cur = nil
|
||||
|
||||
// Create buffers
|
||||
for i := 0; i < buffers; i++ {
|
||||
a.reuse <- newBuffer(size)
|
||||
}
|
||||
|
||||
// Start async reader
|
||||
go func() {
|
||||
// Ensure that when we exit this is signalled.
|
||||
defer close(a.exited)
|
||||
for {
|
||||
select {
|
||||
case b := <-a.reuse:
|
||||
err := b.read(a.in)
|
||||
a.ready <- b
|
||||
if err != nil {
|
||||
close(a.ready)
|
||||
return
|
||||
}
|
||||
case <-a.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) fill() (err error) {
|
||||
if a.cur.isEmpty() {
|
||||
if a.cur != nil {
|
||||
a.reuse <- a.cur
|
||||
a.cur = nil
|
||||
}
|
||||
b, ok := <-a.ready
|
||||
if !ok {
|
||||
return a.err
|
||||
}
|
||||
a.cur = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will return the next available data.
|
||||
func (a *asyncReader) Read(p []byte) (n int, err error) {
|
||||
// Swap buffer and maybe return error
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Copy what we can
|
||||
n = copy(p, a.cur.buffer())
|
||||
a.cur.increment(n)
|
||||
|
||||
// If at end of buffer, return any error, if present
|
||||
if a.cur.isEmpty() {
|
||||
a.err = a.cur.err
|
||||
return n, a.err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
// The return value n is the number of bytes written.
|
||||
// Any error encountered during the write is also returned.
|
||||
func (a *asyncReader) WriteTo(w io.Writer) (n int64, err error) {
|
||||
n = 0
|
||||
for {
|
||||
err = a.fill()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n2, err := w.Write(a.cur.buffer())
|
||||
a.cur.increment(n2)
|
||||
n += int64(n2)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if a.cur.err != nil {
|
||||
a.err = a.cur.err
|
||||
return n, a.cur.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close will ensure that the underlying async reader is shut down.
|
||||
// It will also close the input supplied on newAsyncReader.
|
||||
func (a *asyncReader) Close() (err error) {
|
||||
select {
|
||||
case <-a.exited:
|
||||
default:
|
||||
close(a.exit)
|
||||
<-a.exited
|
||||
}
|
||||
if !a.closed {
|
||||
a.closed = true
|
||||
return a.in.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Internal buffer
|
||||
// If an error is present, it must be returned
|
||||
// once all buffer content has been served.
|
||||
type buffer struct {
|
||||
buf []byte
|
||||
err error
|
||||
offset int
|
||||
size int
|
||||
}
|
||||
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{buf: make([]byte, size), err: nil, size: size}
|
||||
}
|
||||
|
||||
// isEmpty returns true is offset is at end of
|
||||
// buffer, or
|
||||
func (b *buffer) isEmpty() bool {
|
||||
if b == nil {
|
||||
return true
|
||||
}
|
||||
if len(b.buf)-b.offset <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// read into start of the buffer from the supplied reader,
|
||||
// resets the offset and updates the size of the buffer.
|
||||
// Any error encountered during the read is returned.
|
||||
func (b *buffer) read(rd io.Reader) error {
|
||||
var n int
|
||||
n, b.err = rd.Read(b.buf[0:b.size])
|
||||
b.buf = b.buf[0:n]
|
||||
b.offset = 0
|
||||
return b.err
|
||||
}
|
||||
|
||||
// Return the buffer at current offset
|
||||
func (b *buffer) buffer() []byte {
|
||||
return b.buf[b.offset:]
|
||||
}
|
||||
|
||||
// increment the offset
|
||||
func (b *buffer) increment(n int) {
|
||||
b.offset += n
|
||||
}
|
||||
268
fs/buffer_test.go
Normal file
268
fs/buffer_test.go
Normal file
@@ -0,0 +1,268 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
)
|
||||
|
||||
func TestAsyncReader(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
|
||||
var dst = make([]byte, 100)
|
||||
n, err := ar.Read(dst)
|
||||
if err != nil {
|
||||
t.Fatal("error when reading:", err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatal("unexpected length, expected 10, got ", n)
|
||||
}
|
||||
|
||||
n, err = ar.Read(dst)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
// Test read after error
|
||||
n, err = ar.Read(dst)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
// Test double close
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
|
||||
// Test Close without reading everything
|
||||
buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
|
||||
ar, err = newAsyncReader(buf, 4, 100)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing, noread:", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAsyncWriteTo(t *testing.T) {
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
ar, err := newAsyncReader(buf, 4, 10000)
|
||||
if err != nil {
|
||||
t.Fatal("error when creating:", err)
|
||||
}
|
||||
|
||||
var dst = &bytes.Buffer{}
|
||||
n, err := io.Copy(dst, ar)
|
||||
if err != io.EOF {
|
||||
t.Fatal("error when reading:", err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatal("unexpected length, expected 10, got ", n)
|
||||
}
|
||||
|
||||
// Should still return EOF
|
||||
n, err = io.Copy(dst, ar)
|
||||
if err != io.EOF {
|
||||
t.Fatal("expected io.EOF, got", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatal("unexpected length, expected 0, got ", n)
|
||||
}
|
||||
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("error when closing:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAsyncReaderErrors(t *testing.T) {
|
||||
// test nil reader
|
||||
_, err := newAsyncReader(nil, 4, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
|
||||
// invalid buffer number
|
||||
buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
|
||||
_, err = newAsyncReader(buf, 0, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
_, err = newAsyncReader(buf, -1, 10000)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
|
||||
// invalid buffer size
|
||||
_, err = newAsyncReader(buf, 4, 0)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
_, err = newAsyncReader(buf, 4, -1)
|
||||
if err == nil {
|
||||
t.Fatal("expected error when creating, but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
// Complex read tests, leveraged from "bufio".
|
||||
|
||||
type readMaker struct {
|
||||
name string
|
||||
fn func(io.Reader) io.Reader
|
||||
}
|
||||
|
||||
var readMakers = []readMaker{
|
||||
{"full", func(r io.Reader) io.Reader { return r }},
|
||||
{"byte", iotest.OneByteReader},
|
||||
{"half", iotest.HalfReader},
|
||||
{"data+err", iotest.DataErrReader},
|
||||
{"timeout", iotest.TimeoutReader},
|
||||
}
|
||||
|
||||
// Call Read to accumulate the text of a file
|
||||
func reads(buf io.Reader, m int) string {
|
||||
var b [1000]byte
|
||||
nb := 0
|
||||
for {
|
||||
n, err := buf.Read(b[nb : nb+m])
|
||||
nb += n
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil && err != iotest.ErrTimeout {
|
||||
panic("Data: " + err.Error())
|
||||
} else if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(b[0:nb])
|
||||
}
|
||||
|
||||
type bufReader struct {
|
||||
name string
|
||||
fn func(io.Reader) string
|
||||
}
|
||||
|
||||
var bufreaders = []bufReader{
|
||||
{"1", func(b io.Reader) string { return reads(b, 1) }},
|
||||
{"2", func(b io.Reader) string { return reads(b, 2) }},
|
||||
{"3", func(b io.Reader) string { return reads(b, 3) }},
|
||||
{"4", func(b io.Reader) string { return reads(b, 4) }},
|
||||
{"5", func(b io.Reader) string { return reads(b, 5) }},
|
||||
{"7", func(b io.Reader) string { return reads(b, 7) }},
|
||||
}
|
||||
|
||||
const minReadBufferSize = 16
|
||||
|
||||
var bufsizes = []int{
|
||||
0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderSizes(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
s := bufreader.fn(ar)
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err := ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected close error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test various input buffer sizes, number of buffers and read sizes.
|
||||
func TestAsyncReaderWriteTo(t *testing.T) {
|
||||
var texts [31]string
|
||||
str := ""
|
||||
all := ""
|
||||
for i := 0; i < len(texts)-1; i++ {
|
||||
texts[i] = str + "\n"
|
||||
all += texts[i]
|
||||
str += string(i%26 + 'a')
|
||||
}
|
||||
texts[len(texts)-1] = all
|
||||
|
||||
for h := 0; h < len(texts); h++ {
|
||||
text := texts[h]
|
||||
for i := 0; i < len(readMakers); i++ {
|
||||
for j := 0; j < len(bufreaders); j++ {
|
||||
for k := 0; k < len(bufsizes); k++ {
|
||||
for l := 1; l < 10; l++ {
|
||||
readmaker := readMakers[i]
|
||||
bufreader := bufreaders[j]
|
||||
bufsize := bufsizes[k]
|
||||
read := readmaker.fn(strings.NewReader(text))
|
||||
buf := bufio.NewReaderSize(read, bufsize)
|
||||
ar, _ := newAsyncReader(ioutil.NopCloser(buf), l, 100)
|
||||
dst := &bytes.Buffer{}
|
||||
wt := ar.(io.WriterTo)
|
||||
_, err := wt.WriteTo(dst)
|
||||
if err != nil && err != io.EOF && err != iotest.ErrTimeout {
|
||||
t.Fatal("Copy:", err)
|
||||
}
|
||||
s := dst.String()
|
||||
// "timeout" expects the Reader to recover, asyncReader does not.
|
||||
if s != text && readmaker.name != "timeout" {
|
||||
t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
|
||||
readmaker.name, bufreader.name, bufsize, text, s)
|
||||
}
|
||||
err = ar.Close()
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected close error:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
127
fs/config.go
127
fs/config.go
@@ -4,6 +4,7 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
@@ -16,26 +17,29 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
const (
|
||||
configFileName = ".rclone.conf"
|
||||
)
|
||||
|
||||
// SizeSuffix is parsed by flag with k/M/G suffixes
|
||||
type SizeSuffix int64
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Config file
|
||||
// ConfigFile is the config file data structure
|
||||
ConfigFile *goconfig.ConfigFile
|
||||
// Home directory
|
||||
// HomeDir is the home directory of the user
|
||||
HomeDir = configHome()
|
||||
// Config file path
|
||||
// ConfigPath points to the config file
|
||||
ConfigPath = path.Join(HomeDir, configFileName)
|
||||
// Global config
|
||||
// Config is the global config
|
||||
Config = &ConfigInfo{}
|
||||
// Flags
|
||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||
@@ -44,9 +48,14 @@ var (
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
||||
bwLimit SizeSuffix
|
||||
)
|
||||
|
||||
@@ -111,24 +120,56 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (x *SizeSuffix) Type() string {
|
||||
return "int64"
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*SizeSuffix)(nil)
|
||||
|
||||
// Filesystem config options
|
||||
// Obscure a config value
|
||||
func Obscure(x string) string {
|
||||
y := []byte(x)
|
||||
for i := range y {
|
||||
y[i] ^= byte(i) ^ 0xAA
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(y)
|
||||
}
|
||||
|
||||
// Reveal a config value
|
||||
func Reveal(y string) string {
|
||||
x, err := base64.StdEncoding.DecodeString(y)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to reveal %q: %v", y, err)
|
||||
}
|
||||
for i := range x {
|
||||
x[i] ^= byte(i) ^ 0xAA
|
||||
}
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
DumpHeaders bool
|
||||
DumpBodies bool
|
||||
Filter *Filter
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
}
|
||||
|
||||
// Transport returns an http.RoundTripper with the correct timeouts
|
||||
func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
return &httpclient.Transport{
|
||||
t := &httpclient.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
MaxIdleConnsPerHost: ci.Checkers + ci.Transfers + 1,
|
||||
|
||||
@@ -150,10 +191,22 @@ func (ci *ConfigInfo) Transport() http.RoundTripper {
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout: ci.Timeout,
|
||||
|
||||
// InsecureSkipVerify controls whether a client verifies the
|
||||
// server's certificate chain and host name.
|
||||
// If InsecureSkipVerify is true, TLS accepts any certificate
|
||||
// presented by the server and any host name in that certificate.
|
||||
// In this mode, TLS is susceptible to man-in-the-middle attacks.
|
||||
// This should be used only for testing.
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: ci.InsecureSkipVerify},
|
||||
}
|
||||
if ci.DumpHeaders || ci.DumpBodies {
|
||||
return NewLoggedTransport(t, ci.DumpBodies)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Transport returns an http.Client with the correct timeouts
|
||||
// Client returns an http.Client with the correct timeouts
|
||||
func (ci *ConfigInfo) Client() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: ci.Transport(),
|
||||
@@ -181,7 +234,7 @@ func configHome() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Loads the config file
|
||||
// LoadConfig loads the config file
|
||||
func LoadConfig() {
|
||||
// Read some flags if set
|
||||
//
|
||||
@@ -194,6 +247,11 @@ func LoadConfig() {
|
||||
Config.DryRun = *dryRun
|
||||
Config.Timeout = *timeout
|
||||
Config.ConnectTimeout = *connectTimeout
|
||||
Config.CheckSum = *checkSum
|
||||
Config.SizeOnly = *sizeOnly
|
||||
Config.DumpHeaders = *dumpHeaders
|
||||
Config.DumpBodies = *dumpBodies
|
||||
Config.InsecureSkipVerify = *skipVerify
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
@@ -201,18 +259,24 @@ func LoadConfig() {
|
||||
var err error
|
||||
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
|
||||
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
|
||||
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read null config file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load filters
|
||||
Config.Filter, err = NewFilter()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
// Start the token bucket limiter
|
||||
startTokenBucket()
|
||||
}
|
||||
|
||||
// Save configuration file.
|
||||
// SaveConfig saves configuration file.
|
||||
func SaveConfig() {
|
||||
err := goconfig.SaveConfigFile(ConfigFile, ConfigPath)
|
||||
if err != nil {
|
||||
@@ -224,7 +288,7 @@ func SaveConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
// Show an overview of the config file
|
||||
// ShowRemotes shows an overview of the config file
|
||||
func ShowRemotes() {
|
||||
remotes := ConfigFile.GetSectionList()
|
||||
if len(remotes) == 0 {
|
||||
@@ -245,7 +309,7 @@ func ChooseRemote() string {
|
||||
return Choose("remote", remotes, nil, false)
|
||||
}
|
||||
|
||||
// Read some input
|
||||
// ReadLine reads some input
|
||||
func ReadLine() string {
|
||||
buf := bufio.NewReader(os.Stdin)
|
||||
line, err := buf.ReadString('\n')
|
||||
@@ -277,7 +341,7 @@ func Command(commands []string) byte {
|
||||
}
|
||||
}
|
||||
|
||||
// Asks the user for Yes or No and returns true or false
|
||||
// Confirm asks the user for Yes or No and returns true or false
|
||||
func Confirm() bool {
|
||||
return Command([]string{"yYes", "nNo"}) == 'y'
|
||||
}
|
||||
@@ -314,7 +378,7 @@ func Choose(what string, defaults, help []string, newOk bool) string {
|
||||
}
|
||||
}
|
||||
|
||||
// Show the contents of the remote
|
||||
// ShowRemote shows the contents of the remote
|
||||
func ShowRemote(name string) {
|
||||
fmt.Printf("--------------------\n")
|
||||
fmt.Printf("[%s]\n", name)
|
||||
@@ -324,7 +388,7 @@ func ShowRemote(name string) {
|
||||
fmt.Printf("--------------------\n")
|
||||
}
|
||||
|
||||
// Print the contents of the remote and ask if it is OK
|
||||
// OkRemote prints the contents of the remote and ask if it is OK
|
||||
func OkRemote(name string) bool {
|
||||
ShowRemote(name)
|
||||
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
|
||||
@@ -341,7 +405,7 @@ func OkRemote(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Runs the config helper for the remote if needed
|
||||
// RemoteConfig runs the config helper for the remote if needed
|
||||
func RemoteConfig(name string) {
|
||||
fmt.Printf("Remote config\n")
|
||||
fsName := ConfigFile.MustValue(name, "type")
|
||||
@@ -357,7 +421,7 @@ func RemoteConfig(name string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Choose an option
|
||||
// ChooseOption asks the user to choose an option
|
||||
func ChooseOption(o *Option) string {
|
||||
fmt.Println(o.Help)
|
||||
if len(o.Examples) > 0 {
|
||||
@@ -373,7 +437,7 @@ func ChooseOption(o *Option) string {
|
||||
return ReadLine()
|
||||
}
|
||||
|
||||
// Make a new remote
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(name string) {
|
||||
fmt.Printf("What type of source is it?\n")
|
||||
types := []string{}
|
||||
@@ -397,7 +461,7 @@ func NewRemote(name string) {
|
||||
EditRemote(name)
|
||||
}
|
||||
|
||||
// Edit a remote
|
||||
// EditRemote gets the user to edit a remote
|
||||
func EditRemote(name string) {
|
||||
ShowRemote(name)
|
||||
fmt.Printf("Edit remote\n")
|
||||
@@ -419,13 +483,13 @@ func EditRemote(name string) {
|
||||
SaveConfig()
|
||||
}
|
||||
|
||||
// Delete a remote
|
||||
// DeleteRemote gets the user to delete a remote
|
||||
func DeleteRemote(name string) {
|
||||
ConfigFile.DeleteSection(name)
|
||||
SaveConfig()
|
||||
}
|
||||
|
||||
// Edit the config file interactively
|
||||
// EditConfig edits the config file interactively
|
||||
func EditConfig() {
|
||||
for {
|
||||
haveRemotes := len(ConfigFile.GetSectionList()) != 0
|
||||
@@ -447,11 +511,14 @@ func EditConfig() {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name := ReadLine()
|
||||
parts := matcher.FindStringSubmatch(name + ":")
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name\n")
|
||||
case isDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter\n", name)
|
||||
case len(parts) != 3 || parts[2] != "":
|
||||
fmt.Printf("Can't use %q as it has invalid characters in it %v\n", name, parts)
|
||||
default:
|
||||
NewRemote(name)
|
||||
break nameLoop
|
||||
|
||||
@@ -55,3 +55,21 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReveal(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"2sTcyNrA", "potato"},
|
||||
} {
|
||||
got := Reveal(test.in)
|
||||
if got != test.want {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
if Obscure(got) != test.in {
|
||||
t.Errorf("%q: wasn't bidirectional", test.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
102
fs/error.go
Normal file
102
fs/error.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Errors and error handling
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Retry is an optional interface for error as to whether the
|
||||
// operation should be retried at a high level.
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
}
|
||||
|
||||
// retryError is a type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// PlainRetryError is an error wrapped so it will retry
|
||||
type plainRetryError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (err plainRetryError) Retry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = plainRetryError{(error)(nil)}
|
||||
|
||||
// RetryError makes an error which indicates it would like to be retried
|
||||
func RetryError(err error) error {
|
||||
return plainRetryError{err}
|
||||
}
|
||||
|
||||
// ShouldRetry looks at an error and tries to work out if retrying the
|
||||
// operation that caused it would be a good idea. It returns true if
|
||||
// the error implements Timeout() or Temporary() and it returns true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Unwrap url.Error
|
||||
if urlErr, ok := err.(*url.Error); ok {
|
||||
err = urlErr.Err
|
||||
}
|
||||
|
||||
// Check for net error Timeout()
|
||||
if x, ok := err.(interface {
|
||||
Timeout() bool
|
||||
}); ok && x.Timeout() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for net error Temporary()
|
||||
if x, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && x.Temporary() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldRetryHTTP returns a boolean as to whether this resp deserves.
|
||||
// It checks to see if the HTTP response code is in the slice
|
||||
// retryErrorCodes.
|
||||
func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if resp.StatusCode == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
248
fs/filter.go
Normal file
248
fs/filter.go
Normal file
@@ -0,0 +1,248 @@
|
||||
// Control the filtering of files
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Global
|
||||
var (
|
||||
// Flags
|
||||
deleteExcluded = pflag.BoolP("delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
filterRule = pflag.StringP("filter", "f", "", "Add a file-filtering rule")
|
||||
filterFrom = pflag.StringP("filter-from", "", "", "Read filtering patterns from a file")
|
||||
excludeRule = pflag.StringP("exclude", "", "", "Exclude files matching pattern")
|
||||
excludeFrom = pflag.StringP("exclude-from", "", "", "Read exclude patterns from file")
|
||||
includeRule = pflag.StringP("include", "", "", "Include files matching pattern")
|
||||
includeFrom = pflag.StringP("include-from", "", "", "Read include patterns from file")
|
||||
filesFrom = pflag.StringP("files-from", "", "", "Read list of source-file names from file")
|
||||
minSize SizeSuffix
|
||||
maxSize SizeSuffix
|
||||
dumpFilters = pflag.BoolP("dump-filters", "", false, "Dump the filters to the output")
|
||||
//cvsExclude = pflag.BoolP("cvs-exclude", "C", false, "Exclude files in the same way CVS does")
|
||||
)
|
||||
|
||||
func init() {
|
||||
pflag.VarP(&minSize, "min-size", "", "Don't transfer any file smaller than this in k or suffix k|M|G")
|
||||
pflag.VarP(&maxSize, "max-size", "", "Don't transfer any file larger than this in k or suffix k|M|G")
|
||||
}
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
func (r *rule) Match(path string) bool {
|
||||
return r.Regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// String the rule
|
||||
func (r *rule) String() string {
|
||||
c := "-"
|
||||
if r.Include {
|
||||
c = "+"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// filesMap describes the map of files to transfer
|
||||
type filesMap map[string]struct{}
|
||||
|
||||
// Filter describes any filtering in operation
|
||||
type Filter struct {
|
||||
DeleteExcluded bool
|
||||
MinSize int64
|
||||
MaxSize int64
|
||||
rules []rule
|
||||
files filesMap
|
||||
}
|
||||
|
||||
// NewFilter parses the command line options and creates a Filter object
|
||||
func NewFilter() (f *Filter, err error) {
|
||||
f = &Filter{
|
||||
DeleteExcluded: *deleteExcluded,
|
||||
MinSize: int64(minSize),
|
||||
MaxSize: int64(maxSize),
|
||||
}
|
||||
if *includeRule != "" {
|
||||
err = f.Add(true, *includeRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *includeFrom != "" {
|
||||
err := forEachLine(*includeFrom, func(line string) error {
|
||||
return f.Add(true, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add implicit exclude
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *excludeRule != "" {
|
||||
err = f.Add(false, *excludeRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *excludeFrom != "" {
|
||||
err := forEachLine(*excludeFrom, func(line string) error {
|
||||
return f.Add(false, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filterRule != "" {
|
||||
err = f.AddRule(*filterRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filterFrom != "" {
|
||||
err := forEachLine(*filterFrom, f.AddRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *filesFrom != "" {
|
||||
err := forEachLine(*filesFrom, func(line string) error {
|
||||
return f.AddFile(line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if *dumpFilters {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
fmt.Println("--- end filters ---")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Add adds a filter rule with include or exclude status indicated
|
||||
func (f *Filter) Add(Include bool, glob string) error {
|
||||
re, err := globToRegexp(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
f.rules = append(f.rules, rule)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRule adds a filter rule with include/exclude indicated by the prefix
|
||||
//
|
||||
// These are
|
||||
//
|
||||
// + glob
|
||||
// - glob
|
||||
// !
|
||||
//
|
||||
// '+' includes the glob, '-' excludes it and '!' resets the filter list
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func (f *Filter) AddRule(rule string) error {
|
||||
switch {
|
||||
case rule == "!":
|
||||
f.Clear()
|
||||
return nil
|
||||
case strings.HasPrefix(rule, "- "):
|
||||
return f.Add(false, rule[2:])
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return f.Add(true, rule[2:])
|
||||
}
|
||||
return fmt.Errorf("Malformed rule %q", rule)
|
||||
}
|
||||
|
||||
// AddFile adds a single file to the files from list
|
||||
func (f *Filter) AddFile(file string) error {
|
||||
if f.files == nil {
|
||||
f.files = make(filesMap)
|
||||
}
|
||||
file = strings.Trim(file, "/")
|
||||
f.files[file] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears all the filter rules
|
||||
func (f *Filter) Clear() {
|
||||
f.rules = nil
|
||||
}
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
return include
|
||||
}
|
||||
if f.MinSize != 0 && size < f.MinSize {
|
||||
return false
|
||||
}
|
||||
if f.MaxSize != 0 && size > f.MaxSize {
|
||||
return false
|
||||
}
|
||||
for _, rule := range f.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';'
|
||||
func forEachLine(path string, fn func(string) error) (err error) {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer checkClose(in, &err)
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
|
||||
continue
|
||||
}
|
||||
err := fn(line)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// DumpFilters dumps the filters in textual form, 1 per line
|
||||
func (f *Filter) DumpFilters() string {
|
||||
rules := []string{}
|
||||
for _, rule := range f.rules {
|
||||
rules = append(rules, rule.String())
|
||||
}
|
||||
return strings.Join(rules, "\n")
|
||||
}
|
||||
324
fs/filter_test.go
Normal file
324
fs/filter_test.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewFilterDefault(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != false {
|
||||
t.Errorf("DeleteExcluded want false got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != 0 {
|
||||
t.Errorf("MinSize want 0 got %v", f.MinSize)
|
||||
}
|
||||
if f.MaxSize != 0 {
|
||||
t.Errorf("MaxSize want 0 got %v", f.MaxSize)
|
||||
}
|
||||
if len(f.rules) != 0 {
|
||||
t.Errorf("rules want non got %v", f.rules)
|
||||
}
|
||||
if f.files != nil {
|
||||
t.Errorf("files want none got %v", f.files)
|
||||
}
|
||||
}
|
||||
|
||||
// return a pointer to the string
|
||||
func stringP(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
// testFile creates a temp file with the contents
|
||||
func testFile(t *testing.T, contents string) *string {
|
||||
out, err := ioutil.TempFile("", "filter_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
_, err = out.Write([]byte(contents))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s := out.Name()
|
||||
return &s
|
||||
}
|
||||
|
||||
func TestNewFilterFull(t *testing.T) {
|
||||
mins := int64(100 * 1024)
|
||||
maxs := int64(1000 * 1024)
|
||||
emptyString := ""
|
||||
isFalse := false
|
||||
isTrue := true
|
||||
|
||||
// Set up the input
|
||||
deleteExcluded = &isTrue
|
||||
filterRule = stringP("- filter1")
|
||||
filterFrom = testFile(t, "#comment\n+ filter2\n- filter3\n")
|
||||
excludeRule = stringP("exclude1")
|
||||
excludeFrom = testFile(t, "#comment\nexclude2\nexclude3\n")
|
||||
includeRule = stringP("include1")
|
||||
includeFrom = testFile(t, "#comment\ninclude2\ninclude3\n")
|
||||
filesFrom = testFile(t, "#comment\nfiles1\nfiles2\n")
|
||||
minSize = SizeSuffix(mins)
|
||||
maxSize = SizeSuffix(maxs)
|
||||
|
||||
rm := func(p string) {
|
||||
err := os.Remove(p)
|
||||
if err != nil {
|
||||
t.Logf("error removing %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
// Reset the input
|
||||
defer func() {
|
||||
rm(*filterFrom)
|
||||
rm(*excludeFrom)
|
||||
rm(*includeFrom)
|
||||
rm(*filesFrom)
|
||||
minSize = 0
|
||||
maxSize = 0
|
||||
deleteExcluded = &isFalse
|
||||
filterRule = &emptyString
|
||||
filterFrom = &emptyString
|
||||
excludeRule = &emptyString
|
||||
excludeFrom = &emptyString
|
||||
includeRule = &emptyString
|
||||
includeFrom = &emptyString
|
||||
filesFrom = &emptyString
|
||||
}()
|
||||
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f.DeleteExcluded != true {
|
||||
t.Errorf("DeleteExcluded want true got %v", f.DeleteExcluded)
|
||||
}
|
||||
if f.MinSize != mins {
|
||||
t.Errorf("MinSize want %v got %v", mins, f.MinSize)
|
||||
}
|
||||
if f.MaxSize != maxs {
|
||||
t.Errorf("MaxSize want %v got %v", maxs, f.MaxSize)
|
||||
}
|
||||
got := f.DumpFilters()
|
||||
want := `+ (^|/)include1$
|
||||
- (^|/)[^/]*$
|
||||
+ (^|/)include2$
|
||||
+ (^|/)include3$
|
||||
- (^|/)[^/]*$
|
||||
- (^|/)exclude1$
|
||||
- (^|/)exclude2$
|
||||
- (^|/)exclude3$
|
||||
- (^|/)filter1$
|
||||
+ (^|/)filter2$
|
||||
- (^|/)filter3$`
|
||||
if got != want {
|
||||
t.Errorf("rules want %s got %s", want, got)
|
||||
}
|
||||
if len(f.files) != 2 {
|
||||
t.Errorf("files want 2 got %v", f.files)
|
||||
}
|
||||
for _, name := range []string{"files1", "files2"} {
|
||||
_, ok := f.files[name]
|
||||
if !ok {
|
||||
t.Errorf("Didn't find file %q in f.files", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type includeTest struct {
|
||||
in string
|
||||
size int64
|
||||
want bool
|
||||
}
|
||||
|
||||
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
|
||||
for _, test := range tests {
|
||||
got := f.Include(test.in, test.size)
|
||||
if test.want != got {
|
||||
t.Errorf("%q,%d: want %v got %v", test.in, test.size, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterIncludeFiles(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.AddFile("file1.jpg")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = f.AddFile("/file2.jpg")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 0, true},
|
||||
{"file2.jpg", 1, true},
|
||||
{"potato/file2.jpg", 2, false},
|
||||
{"file3.jpg", 3, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.MinSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, true},
|
||||
{"potato/file2.jpg", 99, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMaxSize(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.MaxSize = 100
|
||||
testInclude(t, f, []includeTest{
|
||||
{"file1.jpg", 100, true},
|
||||
{"file2.jpg", 101, false},
|
||||
{"potato/file2.jpg", 99, true},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewFilterMatches(t *testing.T) {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
add := func(s string) {
|
||||
err := f.AddRule(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
add("+ cleared")
|
||||
add("!")
|
||||
add("- file1.jpg")
|
||||
add("+ file2.png")
|
||||
add("+ *.jpg")
|
||||
add("- *.png")
|
||||
add("- /potato")
|
||||
add("+ /sausage1")
|
||||
add("+ /sausage2*")
|
||||
add("+ /sausage3**")
|
||||
add("- *")
|
||||
testInclude(t, f, []includeTest{
|
||||
{"cleared", 100, false},
|
||||
{"file1.jpg", 100, false},
|
||||
{"file2.png", 100, true},
|
||||
{"afile2.png", 100, false},
|
||||
{"file3.jpg", 101, true},
|
||||
{"file4.png", 101, false},
|
||||
{"potato", 101, false},
|
||||
{"sausage1", 101, true},
|
||||
{"sausage1/potato", 101, false},
|
||||
{"sausage2potato", 101, true},
|
||||
{"sausage2/potato", 101, false},
|
||||
{"sausage3/potato", 101, true},
|
||||
{"unicorn", 99, false},
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterForEachLine(t *testing.T) {
|
||||
file := testFile(t, `; comment
|
||||
one
|
||||
# another comment
|
||||
|
||||
|
||||
two
|
||||
# indented comment
|
||||
three
|
||||
four
|
||||
five
|
||||
six `)
|
||||
defer func() {
|
||||
err := os.Remove(*file)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
lines := []string{}
|
||||
err := forEachLine(*file, func(s string) error {
|
||||
lines = append(lines, s)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
got := strings.Join(lines, ",")
|
||||
want := "one,two,three,four,five,six"
|
||||
if want != got {
|
||||
t.Errorf("want %q got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterMatchesFromDocs(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
glob string
|
||||
included bool
|
||||
file string
|
||||
}{
|
||||
{"file.jpg", true, "file.jpg"},
|
||||
{"file.jpg", true, "directory/file.jpg"},
|
||||
{"file.jpg", false, "afile.jpg"},
|
||||
{"file.jpg", false, "directory/afile.jpg"},
|
||||
{"/file.jpg", true, "file.jpg"},
|
||||
{"/file.jpg", false, "afile.jpg"},
|
||||
{"/file.jpg", false, "directory/file.jpg"},
|
||||
{"*.jpg", true, "file.jpg"},
|
||||
{"*.jpg", true, "directory/file.jpg"},
|
||||
{"*.jpg", false, "file.jpg/anotherfile.png"},
|
||||
{"dir/**", true, "dir/file.jpg"},
|
||||
{"dir/**", true, "dir/dir1/dir2/file.jpg"},
|
||||
{"dir/**", false, "directory/file.jpg"},
|
||||
{"dir/**", false, "adir/file.jpg"},
|
||||
{"l?ss", true, "less"},
|
||||
{"l?ss", true, "lass"},
|
||||
{"l?ss", false, "floss"},
|
||||
{"h[ae]llo", true, "hello"},
|
||||
{"h[ae]llo", true, "hallo"},
|
||||
{"h[ae]llo", false, "hullo"},
|
||||
{"{one,two}_potato", true, "one_potato"},
|
||||
{"{one,two}_potato", true, "two_potato"},
|
||||
{"{one,two}_potato", false, "three_potato"},
|
||||
{"{one,two}_potato", false, "_potato"},
|
||||
{"\\*.jpg", true, "*.jpg"},
|
||||
{"\\\\.jpg", true, "\\.jpg"},
|
||||
{"\\[one\\].jpg", true, "[one].jpg"},
|
||||
} {
|
||||
f, err := NewFilter()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.Add(true, test.glob)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = f.Add(false, "*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
included := f.Include(test.file, 0)
|
||||
if included != test.included {
|
||||
t.Logf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
|
||||
}
|
||||
}
|
||||
}
|
||||
143
fs/fs.go
143
fs/fs.go
@@ -1,5 +1,4 @@
|
||||
// File system interface
|
||||
|
||||
// Package fs is a generic file system interface for rclone object storage systems
|
||||
package fs
|
||||
|
||||
import (
|
||||
@@ -13,20 +12,27 @@ import (
|
||||
|
||||
// Constants
|
||||
const (
|
||||
// User agent for Fs which can set it
|
||||
// UserAgent for Fs which can set it
|
||||
UserAgent = "rclone/" + Version
|
||||
// ModTimeNotSupported is a very large precision value to show
|
||||
// mod time isn't supported on this Fs
|
||||
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Filesystem registry
|
||||
fsRegistry []*FsInfo
|
||||
// Error returned by NewFs if not found in config file
|
||||
NotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
||||
fsRegistry []*Info
|
||||
// ErrorNotFoundInConfigFile is returned by NewFs if not found in config file
|
||||
ErrorNotFoundInConfigFile = fmt.Errorf("Didn't find section in config file")
|
||||
ErrorCantCopy = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantMove = fmt.Errorf("Can't copy object - incompatible remotes")
|
||||
ErrorCantDirMove = fmt.Errorf("Can't copy directory - incompatible remotes")
|
||||
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
|
||||
)
|
||||
|
||||
// Filesystem info
|
||||
type FsInfo struct {
|
||||
// Info information about a filesystem
|
||||
type Info struct {
|
||||
// Name of this fs
|
||||
Name string
|
||||
// Create a new file system. If root refers to an existing
|
||||
@@ -39,7 +45,7 @@ type FsInfo struct {
|
||||
Options []Option
|
||||
}
|
||||
|
||||
// An options for a Fs
|
||||
// Option is describes an option for the config wizard
|
||||
type Option struct {
|
||||
Name string
|
||||
Help string
|
||||
@@ -47,7 +53,7 @@ type Option struct {
|
||||
Examples []OptionExample
|
||||
}
|
||||
|
||||
// An example for an option
|
||||
// OptionExample describes an example for an Option
|
||||
type OptionExample struct {
|
||||
Value string
|
||||
Help string
|
||||
@@ -56,22 +62,28 @@ type OptionExample struct {
|
||||
// Register a filesystem
|
||||
//
|
||||
// Fs modules should use this in an init() function
|
||||
func Register(info *FsInfo) {
|
||||
func Register(info *Info) {
|
||||
fsRegistry = append(fsRegistry, info)
|
||||
}
|
||||
|
||||
// A Filesystem, describes the local filesystem and the remote object store
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
type Fs interface {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
Name() string
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
Root() string
|
||||
|
||||
// String returns a description of the FS
|
||||
String() string
|
||||
|
||||
// List the Fs into a channel
|
||||
List() ObjectsChan
|
||||
|
||||
// List the Fs directories/buckets/containers into a channel
|
||||
// ListDir lists the Fs directories/buckets/containers into a channel
|
||||
ListDir() DirChan
|
||||
|
||||
// Find the Object at remote. Returns nil if can't be found
|
||||
// NewFsObject finds the Object at remote. Returns nil if can't be found
|
||||
NewFsObject(remote string) Object
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -81,12 +93,12 @@ type Fs interface {
|
||||
// nil and the error
|
||||
Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error)
|
||||
|
||||
// Make the directory (container, bucket)
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
Mkdir() error
|
||||
|
||||
// Remove the directory (container, bucket) if empty
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
Rmdir() error
|
||||
@@ -95,8 +107,7 @@ type Fs interface {
|
||||
Precision() time.Duration
|
||||
}
|
||||
|
||||
// A filesystem like object which can either be a remote object or a
|
||||
// local file/directory
|
||||
// Object is a filesystem like object provided by an Fs
|
||||
type Object interface {
|
||||
// String returns a description of the Object
|
||||
String() string
|
||||
@@ -108,9 +119,11 @@ type Object interface {
|
||||
Remote() string
|
||||
|
||||
// Md5sum returns the md5 checksum of the file
|
||||
// If no Md5sum is available it returns ""
|
||||
Md5sum() (string, error)
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
ModTime() time.Time
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
@@ -132,7 +145,7 @@ type Object interface {
|
||||
Remove() error
|
||||
}
|
||||
|
||||
// Optional interfaces
|
||||
// Purger is an optional interfaces for Fs
|
||||
type Purger interface {
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
@@ -143,50 +156,63 @@ type Purger interface {
|
||||
Purge() error
|
||||
}
|
||||
|
||||
// An optional interface for error as to whether the operation should be retried
|
||||
//
|
||||
// This should be returned from Update or Put methods as required
|
||||
type Retry interface {
|
||||
error
|
||||
Retry() bool
|
||||
// Copier is an optional interface for Fs
|
||||
type Copier interface {
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
Copy(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
// A type of error
|
||||
type retryError string
|
||||
|
||||
// Error interface
|
||||
func (r retryError) Error() string {
|
||||
return string(r)
|
||||
// Mover is an optional interface for Fs
|
||||
type Mover interface {
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
Move(src Object, remote string) (Object, error)
|
||||
}
|
||||
|
||||
// Retry interface
|
||||
func (r retryError) Retry() bool {
|
||||
return true
|
||||
// DirMover is an optional interface for Fs
|
||||
type DirMover interface {
|
||||
// DirMove moves src to this remote using server side move
|
||||
// operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
DirMove(src Fs) error
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ Retry = retryError("")
|
||||
|
||||
// RetryErrorf makes an error which indicates it would like to be retried
|
||||
func RetryErrorf(format string, a ...interface{}) error {
|
||||
return retryError(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
// A channel of Objects
|
||||
// ObjectsChan is a channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
// A slice of Objects
|
||||
// Objects is a slice of Object~s
|
||||
type Objects []Object
|
||||
|
||||
// A pair of Objects
|
||||
// ObjectPair is a pair of Objects used to describe a potential copy
|
||||
// operation.
|
||||
type ObjectPair struct {
|
||||
src, dst Object
|
||||
}
|
||||
|
||||
// A channel of ObjectPair
|
||||
// ObjectPairChan is a channel of ObjectPair
|
||||
type ObjectPairChan chan ObjectPair
|
||||
|
||||
// A structure of directory/container/bucket lists
|
||||
// Dir describes a directory for directory/container/bucket lists
|
||||
type Dir struct {
|
||||
Name string // name of the directory
|
||||
When time.Time // modification or creation time - IsZero for unknown
|
||||
@@ -194,13 +220,13 @@ type Dir struct {
|
||||
Count int64 // number of objects -1 for unknown
|
||||
}
|
||||
|
||||
// A channel of Dir objects
|
||||
// DirChan is a channel of Dir objects
|
||||
type DirChan chan *Dir
|
||||
|
||||
// Finds a FsInfo object for the name passed in
|
||||
// Find looks for an Info object for the name passed in
|
||||
//
|
||||
// Services are looked up in the config file
|
||||
func Find(name string) (*FsInfo, error) {
|
||||
func Find(name string) (*Info, error) {
|
||||
for _, item := range fsRegistry {
|
||||
if item.Name == name {
|
||||
return item, nil
|
||||
@@ -210,7 +236,7 @@ func Find(name string) (*FsInfo, error) {
|
||||
}
|
||||
|
||||
// Pattern to match an rclone url
|
||||
var matcher = regexp.MustCompile(`^([\w_-]+):(.*)$`)
|
||||
var matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
@@ -229,7 +255,7 @@ func NewFs(path string) (Fs, error) {
|
||||
var err error
|
||||
fsName, err = ConfigFile.GetValue(configName, "type")
|
||||
if err != nil {
|
||||
return nil, NotFoundInConfigFile
|
||||
return nil, ErrorNotFoundInConfigFile
|
||||
}
|
||||
}
|
||||
fs, err := Find(fsName)
|
||||
@@ -241,7 +267,7 @@ func NewFs(path string) (Fs, error) {
|
||||
return fs.NewFs(configName, fsPath)
|
||||
}
|
||||
|
||||
// Outputs log for object
|
||||
// OutputLog logs for an object
|
||||
func OutputLog(o interface{}, text string, args ...interface{}) {
|
||||
description := ""
|
||||
if o != nil {
|
||||
@@ -251,20 +277,27 @@ func OutputLog(o interface{}, text string, args ...interface{}) {
|
||||
log.Print(description + out)
|
||||
}
|
||||
|
||||
// Write debuging output for this Object or Fs
|
||||
// Debug writes debuging output for this Object or Fs
|
||||
func Debug(o interface{}, text string, args ...interface{}) {
|
||||
if Config.Verbose {
|
||||
OutputLog(o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Write log output for this Object or Fs
|
||||
// Log writes log output for this Object or Fs
|
||||
func Log(o interface{}, text string, args ...interface{}) {
|
||||
if !Config.Quiet {
|
||||
OutputLog(o, text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorLog writes error log output for this Object or Fs. It
|
||||
// unconditionally logs a message regardless of Config.Quiet or
|
||||
// Config.Verbose.
|
||||
func ErrorLog(o interface{}, text string, args ...interface{}) {
|
||||
OutputLog(o, text, args...)
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
|
||||
117
fs/glob.go
Normal file
117
fs/glob.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// rsync style glob parser
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// globToRegexp converts an rsync style glob to a regexp
|
||||
//
|
||||
// documented in filtering.md
|
||||
func globToRegexp(glob string) (*regexp.Regexp, error) {
|
||||
var re bytes.Buffer
|
||||
if strings.HasPrefix(glob, "/") {
|
||||
glob = glob[1:]
|
||||
_, _ = re.WriteRune('^')
|
||||
} else {
|
||||
_, _ = re.WriteString("(^|/)")
|
||||
}
|
||||
consecutiveStars := 0
|
||||
insertStars := func() error {
|
||||
if consecutiveStars > 0 {
|
||||
switch consecutiveStars {
|
||||
case 1:
|
||||
_, _ = re.WriteString(`[^/]*`)
|
||||
case 2:
|
||||
_, _ = re.WriteString(`.*`)
|
||||
default:
|
||||
return fmt.Errorf("too many stars in %q", glob)
|
||||
}
|
||||
}
|
||||
consecutiveStars = 0
|
||||
return nil
|
||||
}
|
||||
inBraces := false
|
||||
inBrackets := 0
|
||||
slashed := false
|
||||
for _, c := range glob {
|
||||
if slashed {
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = false
|
||||
continue
|
||||
}
|
||||
if c != '*' {
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
_, _ = re.WriteRune(c)
|
||||
if c == '[' {
|
||||
inBrackets++
|
||||
}
|
||||
if c == ']' {
|
||||
inBrackets--
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch c {
|
||||
case '\\':
|
||||
_, _ = re.WriteRune(c)
|
||||
slashed = true
|
||||
case '*':
|
||||
consecutiveStars++
|
||||
case '?':
|
||||
_, _ = re.WriteString(`[^/]`)
|
||||
case '[':
|
||||
_, _ = re.WriteRune(c)
|
||||
inBrackets++
|
||||
case ']':
|
||||
return nil, fmt.Errorf("mismatched ']' in glob %q", glob)
|
||||
case '{':
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("can't nest '{' '}' in glob %q", glob)
|
||||
}
|
||||
inBraces = true
|
||||
_, _ = re.WriteRune('(')
|
||||
case '}':
|
||||
if !inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune(')')
|
||||
inBraces = false
|
||||
case ',':
|
||||
if inBraces {
|
||||
_, _ = re.WriteRune('|')
|
||||
} else {
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above
|
||||
_, _ = re.WriteRune('\\')
|
||||
_, _ = re.WriteRune(c)
|
||||
default:
|
||||
_, _ = re.WriteRune(c)
|
||||
}
|
||||
}
|
||||
err := insertStars()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inBrackets > 0 {
|
||||
return nil, fmt.Errorf("mismatched '[' and ']' in glob %q", glob)
|
||||
}
|
||||
if inBraces {
|
||||
return nil, fmt.Errorf("mismatched '{' and '}' in glob %q", glob)
|
||||
}
|
||||
_, _ = re.WriteRune('$')
|
||||
result, err := regexp.Compile(re.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad glob pattern %q: %v (%q)", glob, err, re.String())
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
64
fs/glob_test.go
Normal file
64
fs/glob_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGlobToRegexp(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
error string
|
||||
}{
|
||||
{``, `(^|/)$`, ``},
|
||||
{`potato`, `(^|/)potato$`, ``},
|
||||
{`potato,sausage`, `(^|/)potato,sausage$`, ``},
|
||||
{`/potato`, `^potato$`, ``},
|
||||
{`potato?sausage`, `(^|/)potato[^/]sausage$`, ``},
|
||||
{`potat[oa]`, `(^|/)potat[oa]$`, ``},
|
||||
{`potat[a-z]or`, `(^|/)potat[a-z]or$`, ``},
|
||||
{`potat[[:alpha:]]or`, `(^|/)potat[[:alpha:]]or$`, ``},
|
||||
{`'.' '+' '(' ')' '|' '^' '$'`, `(^|/)'\.' '\+' '\(' '\)' '\|' '\^' '\$'$`, ``},
|
||||
{`*.jpg`, `(^|/)[^/]*\.jpg$`, ``},
|
||||
{`a{b,c,d}e`, `(^|/)a(b|c|d)e$`, ``},
|
||||
{`potato**`, `(^|/)potato.*$`, ``},
|
||||
{`potato**sausage`, `(^|/)potato.*sausage$`, ``},
|
||||
{`*.p[lm]`, `(^|/)[^/]*\.p[lm]$`, ``},
|
||||
{`[\[\]]`, `(^|/)[\[\]]$`, ``},
|
||||
{`***potato`, `(^|/)`, `too many stars`},
|
||||
{`***`, `(^|/)`, `too many stars`},
|
||||
{`ab]c`, `(^|/)`, `mismatched ']'`},
|
||||
{`ab[c`, `(^|/)`, `mismatched '[' and ']'`},
|
||||
{`ab{{cd`, `(^|/)`, `can't nest`},
|
||||
{`ab{}}cd`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab}c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`ab{c`, `(^|/)`, `mismatched '{' and '}'`},
|
||||
{`*.{jpg,png,gif}`, `(^|/)[^/]*\.(jpg|png|gif)$`, ``},
|
||||
{`[a--b]`, `(^|/)`, `Bad glob pattern`},
|
||||
{`a\*b`, `(^|/)a\*b$`, ``},
|
||||
{`a\\b`, `(^|/)a\\b$`, ``},
|
||||
} {
|
||||
gotRe, err := globToRegexp(test.in)
|
||||
if test.error == "" {
|
||||
if err != nil {
|
||||
t.Errorf("%q: not expecting error: %v", test.in, err)
|
||||
} else {
|
||||
got := gotRe.String()
|
||||
if test.want != got {
|
||||
t.Errorf("%q: want %q got %q", test.in, test.want, got)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Errorf("%q: expecting error but didn't get one", test.in)
|
||||
} else {
|
||||
got := err.Error()
|
||||
if !strings.Contains(got, test.error) {
|
||||
t.Errorf("%q: want error %q got %q", test.in, test.error, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -6,7 +6,8 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// This defines a Limited Fs which can only return the Objects passed in from the Fs passed in
|
||||
// Limited defines a Fs which can only return the Objects passed in
|
||||
// from the Fs passed in
|
||||
type Limited struct {
|
||||
objects []Object
|
||||
fs Fs
|
||||
@@ -21,6 +22,16 @@ func NewLimited(fs Fs, objects ...Object) Fs {
|
||||
return f
|
||||
}
|
||||
|
||||
// Name is name of the remote (as passed into NewFs)
|
||||
func (f *Limited) Name() string {
|
||||
return f.fs.Name() // return name of underlying remote
|
||||
}
|
||||
|
||||
// Root is the root of the remote (as passed into NewFs)
|
||||
func (f *Limited) Root() string {
|
||||
return f.fs.Root() // return root of underlying remote
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Limited) String() string {
|
||||
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
|
||||
@@ -38,14 +49,14 @@ func (f *Limited) List() ObjectsChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// List the Fs directories/buckets/containers into a channel
|
||||
// ListDir lists the Fs directories/buckets/containers into a channel
|
||||
func (f *Limited) ListDir() DirChan {
|
||||
out := make(DirChan, Config.Checkers)
|
||||
close(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// Find the Object at remote. Returns nil if can't be found
|
||||
// NewFsObject finds the Object at remote. Returns nil if can't be found
|
||||
func (f *Limited) NewFsObject(remote string) Object {
|
||||
for _, obj := range f.objects {
|
||||
if obj.Remote() == remote {
|
||||
@@ -68,15 +79,16 @@ func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
return obj, obj.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Make the directory (container, bucket)
|
||||
// Mkdir make the directory (container, bucket)
|
||||
func (f *Limited) Mkdir() error {
|
||||
// All directories are already made - just ignore
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the directory (container, bucket) if empty
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
func (f *Limited) Rmdir() error {
|
||||
return fmt.Errorf("Can't rmdir in limited fs")
|
||||
// Ignore this in a limited fs
|
||||
return nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
@@ -84,5 +96,23 @@ func (f *Limited) Precision() time.Duration {
|
||||
return f.fs.Precision()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Limited) Copy(src Object, remote string) (Object, error) {
|
||||
fCopy, ok := f.fs.(Copier)
|
||||
if !ok {
|
||||
return nil, ErrorCantCopy
|
||||
}
|
||||
return fCopy.Copy(src, remote)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ Fs = &Limited{}
|
||||
var _ Copier = &Limited{}
|
||||
|
||||
57
fs/loghttp.go
Normal file
57
fs/loghttp.go
Normal file
@@ -0,0 +1,57 @@
|
||||
// A logging http transport
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
)
|
||||
|
||||
const (
|
||||
separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
|
||||
separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
|
||||
)
|
||||
|
||||
// LoggedTransport is an http transport which logs the traffic
|
||||
type LoggedTransport struct {
|
||||
wrapped http.RoundTripper
|
||||
logBody bool
|
||||
}
|
||||
|
||||
// NewLoggedTransport wraps the transport passed in and logs all roundtrips
|
||||
// including the body if logBody is set.
|
||||
func NewLoggedTransport(transport http.RoundTripper, logBody bool) *LoggedTransport {
|
||||
return &LoggedTransport{
|
||||
wrapped: transport,
|
||||
logBody: logBody,
|
||||
}
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its
|
||||
// connection. CancelRequest should only be called after RoundTrip has
|
||||
// returned.
|
||||
func (t *LoggedTransport) CancelRequest(req *http.Request) {
|
||||
if wrapped, ok := t.wrapped.(interface {
|
||||
CancelRequest(*http.Request)
|
||||
}); ok {
|
||||
log.Printf("CANCEL REQUEST %v", req)
|
||||
wrapped.CancelRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
// RoundTrip implements the RoundTripper interface.
|
||||
func (t *LoggedTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
buf, _ := httputil.DumpRequestOut(req, t.logBody)
|
||||
log.Println(separatorReq)
|
||||
log.Println("HTTP REQUEST")
|
||||
log.Println(string(buf))
|
||||
log.Println(separatorReq)
|
||||
resp, err = t.wrapped.RoundTrip(req)
|
||||
buf, _ = httputil.DumpResponse(resp, t.logBody)
|
||||
log.Println(separatorResp)
|
||||
log.Println("HTTP RESPONSE")
|
||||
log.Println(string(buf))
|
||||
log.Println(separatorResp)
|
||||
return resp, err
|
||||
}
|
||||
382
fs/operations.go
382
fs/operations.go
@@ -8,9 +8,12 @@ import (
|
||||
"mime"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Work out modify window for fses passed in - sets Config.ModifyWindow
|
||||
// CalculateModifyWindow works out modify window for Fses passed in -
|
||||
// sets Config.ModifyWindow
|
||||
//
|
||||
// This is the largest modify window of all the fses in use, and the
|
||||
// user configured value
|
||||
@@ -21,47 +24,69 @@ func CalculateModifyWindow(fs ...Fs) {
|
||||
if precision > Config.ModifyWindow {
|
||||
Config.ModifyWindow = precision
|
||||
}
|
||||
if precision == ModTimeNotSupported {
|
||||
Debug(f, "Modify window not supported")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
|
||||
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Check the two files to see if the MD5sums are the same
|
||||
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
|
||||
func Md5sumsEqual(src, dst string) bool {
|
||||
if src == "" || dst == "" {
|
||||
return true
|
||||
}
|
||||
return src == dst
|
||||
}
|
||||
|
||||
// CheckMd5sums checks the two files to see if the MD5sums are the same
|
||||
//
|
||||
// Returns two bools, the first of which is equality and the second of
|
||||
// which is true if either of the MD5SUMs were unset.
|
||||
//
|
||||
// May return an error which will already have been logged
|
||||
//
|
||||
// If an error is returned it will return false
|
||||
func CheckMd5sums(src, dst Object) (bool, error) {
|
||||
// If an error is returned it will return equal as false
|
||||
func CheckMd5sums(src, dst Object) (equal bool, unset bool, err error) {
|
||||
srcMd5, err := src.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to calculate src md5: %s", err)
|
||||
return false, err
|
||||
ErrorLog(src, "Failed to calculate src md5: %s", err)
|
||||
return false, false, err
|
||||
}
|
||||
if srcMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
dstMd5, err := dst.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, err
|
||||
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, false, err
|
||||
}
|
||||
if dstMd5 == "" {
|
||||
return true, true, nil
|
||||
}
|
||||
// Debug("Src MD5 %s", srcMd5)
|
||||
// Debug("Dst MD5 %s", obj.Hash)
|
||||
return srcMd5 == dstMd5, nil
|
||||
return Md5sumsEqual(srcMd5, dstMd5), false, nil
|
||||
}
|
||||
|
||||
// Checks to see if the src and dst objects are equal by looking at
|
||||
// Equal checks to see if the src and dst objects are equal by looking at
|
||||
// size, mtime and MD5SUM
|
||||
//
|
||||
// If the src and dst size are different then it is considered to be
|
||||
// not equal.
|
||||
// not equal. If --size-only is in effect then this is the only check
|
||||
// that is done.
|
||||
//
|
||||
// If the size is the same and the mtime is the same then it is
|
||||
// considered to be equal. This is the heuristic rsync uses when
|
||||
// not using --checksum.
|
||||
// considered to be equal. This check is skipped if using --checksum.
|
||||
//
|
||||
// If the size is the same and and mtime is different or unreadable
|
||||
// and the MD5SUM is the same then the file is considered to be equal.
|
||||
// In this case the mtime on the dst is updated.
|
||||
// If the size is the same and mtime is different, unreadable or
|
||||
// --checksum is set and the MD5SUM is the same then the file is
|
||||
// considered to be equal. In this case the mtime on the dst is
|
||||
// updated if --checksum is not set.
|
||||
//
|
||||
// Otherwise the file is considered to be not equal including if there
|
||||
// were errors reading info.
|
||||
@@ -70,36 +95,53 @@ func Equal(src, dst Object) bool {
|
||||
Debug(src, "Sizes differ")
|
||||
return false
|
||||
}
|
||||
|
||||
// Size the same so check the mtime
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
if Config.SizeOnly {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
|
||||
var srcModTime time.Time
|
||||
if !Config.CheckSum {
|
||||
if Config.ModifyWindow == ModTimeNotSupported {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
// Size the same so check the mtime
|
||||
srcModTime = src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// mtime is unreadable or different but size is the same so
|
||||
// check the MD5SUM
|
||||
same, _ := CheckMd5sums(src, dst)
|
||||
same, md5unset, _ := CheckMd5sums(src, dst)
|
||||
if !same {
|
||||
Debug(src, "Md5sums differ")
|
||||
return false
|
||||
}
|
||||
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
if !Config.CheckSum {
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
}
|
||||
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
if md5unset {
|
||||
Debug(src, "Size of src and dst objects identical")
|
||||
} else {
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns a guess at the mime type from the extension
|
||||
// MimeType returns a guess at the mime type from the extension
|
||||
func MimeType(o Object) string {
|
||||
mimeType := mime.TypeByExtension(path.Ext(o.Remote()))
|
||||
if mimeType == "" {
|
||||
@@ -133,24 +175,46 @@ func Copy(f Fs, dst, src Object) {
|
||||
const maxTries = 10
|
||||
tries := 0
|
||||
doUpdate := dst != nil
|
||||
var err, inErr error
|
||||
tryAgain:
|
||||
in0, err := src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
|
||||
var actionTaken string
|
||||
if doUpdate {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken := "Copied (server side copy)"
|
||||
if fCopy, ok := f.(Copier); ok && src.Fs().Name() == f.Name() {
|
||||
var newDst Object
|
||||
newDst, err = fCopy.Copy(src, src.Remote())
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
}
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
err = ErrorCantCopy
|
||||
}
|
||||
// If can't server side copy, do it manually
|
||||
if err == ErrorCantCopy {
|
||||
var in0 io.ReadCloser
|
||||
in0, err = src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// On big files add a buffer
|
||||
if src.Size() > 10<<20 {
|
||||
in0, _ = newAsyncReader(in0, 4, 4<<20)
|
||||
}
|
||||
|
||||
in := NewAccount(in0, src) // account the transfer
|
||||
|
||||
if doUpdate {
|
||||
actionTaken = "Copied (updated existing)"
|
||||
err = dst.Update(in, src.ModTime(), src.Size())
|
||||
} else {
|
||||
actionTaken = "Copied (new)"
|
||||
dst, err = f.Put(in, src.Remote(), src.ModTime(), src.Size())
|
||||
}
|
||||
inErr = in.Close()
|
||||
}
|
||||
inErr := in.Close()
|
||||
// Retry if err returned a retry error
|
||||
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
|
||||
tries++
|
||||
@@ -167,7 +231,7 @@ tryAgain:
|
||||
}
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to copy: %s", err)
|
||||
ErrorLog(src, "Failed to copy: %s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
@@ -176,27 +240,29 @@ tryAgain:
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
Log(dst, "%s", err)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify md5sums are the same after transfer - ignoring blank md5sums
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if !Config.SizeOnly {
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
Log(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,7 +289,7 @@ func checkOne(pair ObjectPair, out ObjectPairChan) {
|
||||
out <- pair
|
||||
}
|
||||
|
||||
// Read Objects~s on in send to out if they need uploading
|
||||
// PairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of MD5SUMS at once
|
||||
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
@@ -236,8 +302,8 @@ func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
// Read Objects on in and copy them
|
||||
func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
// PairCopier reads Objects on in and copies them.
|
||||
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
@@ -251,14 +317,49 @@ func Copier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all the files passed in the channel
|
||||
func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
// PairMover reads Objects on in and moves them if possible, or copies
|
||||
// them if not
|
||||
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src)
|
||||
if Config.DryRun {
|
||||
Debug(src, "Not moving as --dry-run")
|
||||
} else if haveMover {
|
||||
// Delete destination if it exists
|
||||
if pair.dst != nil {
|
||||
err := dst.Remove()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't delete: %v", err)
|
||||
}
|
||||
}
|
||||
_, err := fdstMover.Move(src, src.Remote())
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't move: %v", err)
|
||||
} else {
|
||||
Debug(src, "Moved")
|
||||
}
|
||||
} else {
|
||||
Copy(fdst, pair.dst, src)
|
||||
}
|
||||
Stats.DoneTransferring(src)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFiles removes all the files passed in the channel
|
||||
func DeleteFiles(toBeDeleted ObjectsChan) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range to_be_deleted {
|
||||
for dst := range toBeDeleted {
|
||||
if Config.DryRun {
|
||||
Debug(dst, "Not deleting as --dry-run")
|
||||
} else {
|
||||
@@ -267,7 +368,7 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
Stats.DoneChecking(dst)
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Couldn't delete: %s", err)
|
||||
ErrorLog(dst, "Couldn't delete: %s", err)
|
||||
} else {
|
||||
Debug(dst, "Deleted")
|
||||
}
|
||||
@@ -293,10 +394,22 @@ func readFilesMap(fs Fs) map[string]Object {
|
||||
return files
|
||||
}
|
||||
|
||||
// Same returns true if fdst and fsrc point to the same underlying Fs
|
||||
func Same(fdst, fsrc Fs) bool {
|
||||
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
|
||||
if Same(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := fdst.Mkdir()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
@@ -310,46 +423,57 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
delFiles := readFilesMap(fdst)
|
||||
|
||||
// Read source files checking them off against dest files
|
||||
to_be_checked := make(ObjectPairChan, Config.Transfers)
|
||||
to_be_uploaded := make(ObjectPairChan, Config.Transfers)
|
||||
toBeChecked := make(ObjectPairChan, Config.Transfers)
|
||||
toBeUploaded := make(ObjectPairChan, Config.Transfers)
|
||||
|
||||
var checkerWg sync.WaitGroup
|
||||
checkerWg.Add(Config.Checkers)
|
||||
for i := 0; i < Config.Checkers; i++ {
|
||||
go PairChecker(to_be_checked, to_be_uploaded, &checkerWg)
|
||||
go PairChecker(toBeChecked, toBeUploaded, &checkerWg)
|
||||
}
|
||||
|
||||
var copierWg sync.WaitGroup
|
||||
copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
go Copier(to_be_uploaded, fdst, &copierWg)
|
||||
if DoMove {
|
||||
go PairMover(toBeUploaded, fdst, &copierWg)
|
||||
} else {
|
||||
go PairCopier(toBeUploaded, fdst, &copierWg)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for src := range fsrc.List() {
|
||||
remote := src.Remote()
|
||||
dst, found := delFiles[remote]
|
||||
if found {
|
||||
delete(delFiles, remote)
|
||||
to_be_checked <- ObjectPair{src, dst}
|
||||
dst, dstFound := delFiles[remote]
|
||||
if !Config.Filter.Include(remote, src.Size()) {
|
||||
Debug(src, "Excluding from sync")
|
||||
if dstFound && !Config.Filter.DeleteExcluded {
|
||||
delete(delFiles, remote)
|
||||
}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
to_be_uploaded <- ObjectPair{src, nil}
|
||||
if dstFound {
|
||||
delete(delFiles, remote)
|
||||
toBeChecked <- ObjectPair{src, dst}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
toBeUploaded <- ObjectPair{src, nil}
|
||||
}
|
||||
}
|
||||
}
|
||||
close(to_be_checked)
|
||||
close(toBeChecked)
|
||||
}()
|
||||
|
||||
Log(fdst, "Waiting for checks to finish")
|
||||
checkerWg.Wait()
|
||||
close(to_be_uploaded)
|
||||
close(toBeUploaded)
|
||||
Log(fdst, "Waiting for transfers to finish")
|
||||
copierWg.Wait()
|
||||
|
||||
// Delete files if asked
|
||||
if Delete {
|
||||
if Stats.Errored() {
|
||||
Log(fdst, "Not deleting files as there were IO errors")
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -366,7 +490,50 @@ func Sync(fdst, fsrc Fs, Delete bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks the files in fsrc and fdst according to Size and MD5SUM
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, true, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return syncCopyMove(fdst, fsrc, false, false)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
if Same(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to use DirMover
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() {
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
Debug(fdst, "Using server side directory move")
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Now move the files
|
||||
err := syncCopyMove(fdst, fsrc, false, true)
|
||||
if err != nil || Stats.Errored() {
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return err
|
||||
}
|
||||
return Purge(fsrc)
|
||||
}
|
||||
|
||||
// Check the files in fsrc and fdst according to Size and MD5SUM
|
||||
func Check(fdst, fsrc Fs) error {
|
||||
Log(fdst, "Building file list")
|
||||
|
||||
@@ -392,13 +559,13 @@ func Check(fdst, fsrc Fs) error {
|
||||
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
|
||||
for _, dst := range dstFiles {
|
||||
Stats.Error()
|
||||
Log(dst, "File not in %v", fsrc)
|
||||
ErrorLog(dst, "File not in %v", fsrc)
|
||||
}
|
||||
|
||||
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
|
||||
for _, src := range srcFiles {
|
||||
Stats.Error()
|
||||
Log(src, "File not in %v", fdst)
|
||||
ErrorLog(src, "File not in %v", fdst)
|
||||
}
|
||||
|
||||
checks := make(chan []Object, Config.Transfers)
|
||||
@@ -420,17 +587,17 @@ func Check(fdst, fsrc Fs) error {
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.DoneChecking(src)
|
||||
Stats.Error()
|
||||
Log(src, "Sizes differ")
|
||||
ErrorLog(src, "Sizes differ")
|
||||
continue
|
||||
}
|
||||
same, err := CheckMd5sums(src, dst)
|
||||
same, _, err := CheckMd5sums(src, dst)
|
||||
Stats.DoneChecking(src)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !same {
|
||||
Stats.Error()
|
||||
Log(src, "Md5sums differ")
|
||||
ErrorLog(src, "Md5sums differ")
|
||||
}
|
||||
Debug(src, "OK")
|
||||
}
|
||||
@@ -446,7 +613,7 @@ func Check(fdst, fsrc Fs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List the Fs to the supplied function
|
||||
// ListFn lists the Fs to the supplied function
|
||||
//
|
||||
// Lists in parallel which may get them out of order
|
||||
func ListFn(f Fs, fn func(Object)) error {
|
||||
@@ -469,13 +636,15 @@ func ListFn(f Fs, fn func(Object)) error {
|
||||
var outMutex sync.Mutex
|
||||
|
||||
// Synchronized fmt.Fprintf
|
||||
func syncFprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
//
|
||||
// Ignores errors from Fprintf
|
||||
func syncFprintf(w io.Writer, format string, a ...interface{}) {
|
||||
outMutex.Lock()
|
||||
defer outMutex.Unlock()
|
||||
return fmt.Fprintf(w, format, a...)
|
||||
_, _ = fmt.Fprintf(w, format, a...)
|
||||
}
|
||||
|
||||
// List the Fs to stdout
|
||||
// List the Fs to the supplied writer
|
||||
//
|
||||
// Shows size and path
|
||||
//
|
||||
@@ -486,7 +655,7 @@ func List(f Fs, w io.Writer) error {
|
||||
})
|
||||
}
|
||||
|
||||
// List the Fs to stdout
|
||||
// ListLong lists the Fs to the supplied writer
|
||||
//
|
||||
// Shows size, mod time and path
|
||||
//
|
||||
@@ -496,11 +665,11 @@ func ListLong(f Fs, w io.Writer) error {
|
||||
Stats.Checking(o)
|
||||
modTime := o.ModTime()
|
||||
Stats.DoneChecking(o)
|
||||
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
||||
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
||||
})
|
||||
}
|
||||
|
||||
// List the Fs to stdout
|
||||
// Md5sum list the Fs to the supplied writer
|
||||
//
|
||||
// Produces the same output as the md5sum command
|
||||
//
|
||||
@@ -512,13 +681,22 @@ func Md5sum(f Fs, w io.Writer) error {
|
||||
Stats.DoneChecking(o)
|
||||
if err != nil {
|
||||
Debug(o, "Failed to read MD5: %v", err)
|
||||
md5sum = "UNKNOWN"
|
||||
md5sum = "ERROR"
|
||||
}
|
||||
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
|
||||
})
|
||||
}
|
||||
|
||||
// List the directories/buckets/containers in the Fs to stdout
|
||||
// Count counts the objects and their sizes in the Fs
|
||||
func Count(f Fs) (objects int64, size int64, err error) {
|
||||
err = ListFn(f, func(o Object) {
|
||||
atomic.AddInt64(&objects, 1)
|
||||
atomic.AddInt64(&size, o.Size())
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
||||
func ListDir(f Fs, w io.Writer) error {
|
||||
for dir := range f.ListDir() {
|
||||
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
|
||||
@@ -526,7 +704,7 @@ func ListDir(f Fs, w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Makes a destination directory or container
|
||||
// Mkdir makes a destination directory or container
|
||||
func Mkdir(f Fs) error {
|
||||
err := f.Mkdir()
|
||||
if err != nil {
|
||||
@@ -536,7 +714,7 @@ func Mkdir(f Fs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes a container but not if not empty
|
||||
// Rmdir removes a container but not if not empty
|
||||
func Rmdir(f Fs) error {
|
||||
if Config.DryRun {
|
||||
Log(f, "Not deleting as dry run is set")
|
||||
@@ -550,7 +728,7 @@ func Rmdir(f Fs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes a container and all of its contents
|
||||
// Purge removes a container and all of its contents
|
||||
//
|
||||
// FIXME doesn't delete local directories
|
||||
func Purge(f Fs) error {
|
||||
|
||||
@@ -20,10 +20,12 @@ import (
|
||||
"github.com/ncw/rclone/fstest"
|
||||
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/onedrive"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
@@ -98,7 +100,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.Sync(fremote, flocal, false)
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
@@ -114,7 +116,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
err := fs.Sync(fremote, flocal, false)
|
||||
err := fs.CopyDir(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
@@ -127,6 +129,28 @@ func TestCopy(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote copy %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "sub dir/hello world", Size: 11, ModTime: t1, Md5sum: "5eb63bbbe01eeed093cb22bb8f5acdc3"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteCopy, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLsd(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.ListDir(fremote, &buf)
|
||||
@@ -154,7 +178,7 @@ func TestCopyAfterDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
err := fs.Sync(flocal, fremote, false)
|
||||
err := fs.CopyDir(flocal, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
@@ -169,6 +193,112 @@ func TestCopyRedownload(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
WriteFile("check sum", "", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
err = os.Chtimes(localName+"/check sum", t2, t2)
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
local_items = []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
WriteFile("sizeonly", "potato", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
WriteFile("sizeonly", "POTATO", t2)
|
||||
local_items = []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
WriteFile("empty space", "", t1)
|
||||
|
||||
@@ -176,7 +306,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -189,7 +319,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -203,7 +333,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
WriteFile("potato", "smaller but same date", t3)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -217,8 +347,16 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
|
||||
// Sync after changing a file's contents, modtime but not length
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
if fremote.Precision() == fs.ModTimeNotSupported {
|
||||
t.Logf("ModTimeNotSupported so forcing file to be a different size")
|
||||
WriteFile("potato", "different size to make sure it syncs", t2)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
}
|
||||
WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -238,7 +376,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
fs.Config.DryRun = true
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
err = fs.Sync(fremote, flocal)
|
||||
fs.Config.DryRun = false
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
@@ -258,7 +396,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
@@ -270,6 +408,108 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test with exclude
|
||||
func TestSyncWithExclude(t *testing.T) {
|
||||
WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fs.Config.Filter.MaxSize = 80
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = 0
|
||||
}()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test with exclude and delete excluded
|
||||
func TestSyncWithExcludeAndDeleleteExcluded(t *testing.T) {
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
fs.Config.Filter.DeleteExcluded = true
|
||||
reset := func() {
|
||||
fs.Config.Filter.MaxSize = 0
|
||||
fs.Config.Filter.DeleteExcluded = false
|
||||
}
|
||||
defer reset()
|
||||
err := fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Tidy up
|
||||
reset()
|
||||
err = os.Remove(localName + "/enormous")
|
||||
if err != nil {
|
||||
t.Fatalf("Remove failed: %v", err)
|
||||
}
|
||||
err = fs.Sync(fremote, flocal)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
items = []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open remote move %q: %v", *RemoteName, err)
|
||||
}
|
||||
defer finaliseMove()
|
||||
t.Logf("Server side move (if possible) %v -> %v", fremote, fremoteMove)
|
||||
|
||||
// Start with a copy
|
||||
err = fs.CopyDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// Remove one file
|
||||
obj := fremoteMove.NewFsObject("potato2")
|
||||
if obj == nil {
|
||||
t.Fatalf("Failed to find potato2")
|
||||
}
|
||||
err = obj.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to remove object: %v", err)
|
||||
}
|
||||
|
||||
// Do server side move
|
||||
err = fs.MoveDir(fremoteMove, fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move failed: %v", err)
|
||||
}
|
||||
|
||||
items := []fstest.Item{
|
||||
{Path: "empty space", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
{Path: "potato2", Size: 60, ModTime: t1, Md5sum: "d6548b156ea68a4e003e786df99eee76"},
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items[:0], fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items, fs.Config.ModifyWindow)
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
err = fs.MoveDir(fremote, fremoteMove)
|
||||
if err != nil {
|
||||
t.Fatalf("Server Side Move 2 failed: %v", err)
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, fremote, items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremoteMove, items[:0], fs.Config.ModifyWindow)
|
||||
}
|
||||
|
||||
func TestLs(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
err := fs.List(fremote, &buf)
|
||||
@@ -292,13 +532,38 @@ func TestLsLong(t *testing.T) {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 2011-12-25 12:59:59\.\d{9} empty space$`)
|
||||
if !m1.MatchString(res) {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
lines := strings.Split(strings.Trim(res, "\n"), "\n")
|
||||
if len(lines) != 2 {
|
||||
t.Fatalf("Wrong number of lines in list: %q", lines)
|
||||
}
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 2001-02-03 04:05:06\.\d{9} potato2$`)
|
||||
if !m2.MatchString(res) {
|
||||
|
||||
timeFormat := "2006-01-02 15:04:05.000000000"
|
||||
precision := fremote.Precision()
|
||||
location := time.Now().Location()
|
||||
checkTime := func(m, filename string, expected time.Time) {
|
||||
modTime, err := time.ParseInLocation(timeFormat, m, location) // parse as localtime
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing %q: %v", m, err)
|
||||
} else {
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
|
||||
if ms := m1.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "empty space", t2.Local())
|
||||
}
|
||||
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
|
||||
if ms := m2.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "potato2", t1.Local())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,15 +574,31 @@ func TestMd5sum(t *testing.T) {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") {
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") {
|
||||
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
objects, size, err := fs.Count(fremote)
|
||||
if err != nil {
|
||||
t.Fatalf("Count failed: %v", err)
|
||||
}
|
||||
if objects != 2 {
|
||||
t.Errorf("want 2 objects got %d", objects)
|
||||
}
|
||||
if size != 60 {
|
||||
t.Errorf("want size 60 got %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheck(t *testing.T) {
|
||||
// FIXME
|
||||
}
|
||||
|
||||
// Clean the temporary directory
|
||||
|
||||
@@ -8,6 +8,8 @@ TestS3:
|
||||
TestDrive:
|
||||
TestGoogleCloudStorage:
|
||||
TestDropbox:
|
||||
TestAmazonCloudDrive:
|
||||
TestOneDrive:
|
||||
"
|
||||
|
||||
function test_remote {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
package fs
|
||||
|
||||
const Version = "v1.13"
|
||||
// Version of rclone
|
||||
const Version = "v1.24"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Utilities for testing the fs
|
||||
|
||||
// Package fstest provides utilities for testing the Fs
|
||||
package fstest
|
||||
|
||||
// FIXME put name of test FS in Fs structure
|
||||
@@ -23,22 +22,34 @@ func init() {
|
||||
|
||||
}
|
||||
|
||||
// Represents an item for checking
|
||||
// Item represents an item for checking
|
||||
type Item struct {
|
||||
Path string
|
||||
Md5sum string
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
WinPath string
|
||||
}
|
||||
|
||||
// check the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt := modTime.Sub(i.ModTime)
|
||||
// CheckTimeEqualWithPrecision checks the times are equal within the
|
||||
// precision, returns the delta and a flag
|
||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
// CheckModTime checks the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
|
||||
}
|
||||
}
|
||||
|
||||
// Check checks all the attributes of the object are correct
|
||||
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
if obj == nil {
|
||||
t.Fatalf("Object is nil")
|
||||
@@ -48,7 +59,7 @@ func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
|
||||
}
|
||||
if i.Md5sum != Md5sum {
|
||||
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
|
||||
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
|
||||
}
|
||||
if i.Size != obj.Size() {
|
||||
@@ -57,37 +68,44 @@ func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i.CheckModTime(t, obj, obj.ModTime(), precision)
|
||||
}
|
||||
|
||||
// Represents all items for checking
|
||||
// Items represents all items for checking
|
||||
type Items struct {
|
||||
byName map[string]*Item
|
||||
items []Item
|
||||
byName map[string]*Item
|
||||
byNameAlt map[string]*Item
|
||||
items []Item
|
||||
}
|
||||
|
||||
// Make an Items
|
||||
// NewItems makes an Items
|
||||
func NewItems(items []Item) *Items {
|
||||
is := &Items{
|
||||
byName: make(map[string]*Item),
|
||||
items: items,
|
||||
byName: make(map[string]*Item),
|
||||
byNameAlt: make(map[string]*Item),
|
||||
items: items,
|
||||
}
|
||||
// Fill up byName
|
||||
for i := range items {
|
||||
is.byName[items[i].Path] = &items[i]
|
||||
is.byNameAlt[items[i].WinPath] = &items[i]
|
||||
}
|
||||
return is
|
||||
}
|
||||
|
||||
// Check off an item
|
||||
// Find checks off an item
|
||||
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
i, ok := is.byName[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
i, ok = is.byNameAlt[obj.Remote()]
|
||||
if !ok {
|
||||
t.Errorf("Unexpected file %q", obj.Remote())
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(is.byName, obj.Remote())
|
||||
delete(is.byName, i.Path)
|
||||
delete(is.byName, i.WinPath)
|
||||
i.Check(t, obj, precision)
|
||||
}
|
||||
|
||||
// Check all done
|
||||
// Done checks all finished
|
||||
func (is *Items) Done(t *testing.T) {
|
||||
if len(is.byName) != 0 {
|
||||
for name := range is.byName {
|
||||
@@ -97,10 +115,24 @@ func (is *Items) Done(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
// CheckListingWithPrecision checks the fs to see if it has the
|
||||
// expected contents with the given precision.
|
||||
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision time.Duration) {
|
||||
is := NewItems(items)
|
||||
for obj := range f.List() {
|
||||
oldErrors := fs.Stats.GetErrors()
|
||||
var objs []fs.Object
|
||||
for i := 1; i <= 5; i++ {
|
||||
objs = nil
|
||||
for obj := range f.List() {
|
||||
objs = append(objs, obj)
|
||||
}
|
||||
if len(objs) == len(items) {
|
||||
break
|
||||
}
|
||||
t.Logf("Sleeping for 1 second for list eventual consistency: %d/5", i)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
for _, obj := range objs {
|
||||
if obj == nil {
|
||||
t.Errorf("Unexpected nil in List()")
|
||||
continue
|
||||
@@ -108,15 +140,19 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
|
||||
is.Find(t, obj, precision)
|
||||
}
|
||||
is.Done(t)
|
||||
// Don't notice an error when listing an empty directory
|
||||
if len(items) == 0 && oldErrors == 0 && fs.Stats.GetErrors() == 1 {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
|
||||
// Checks the fs to see if it has the expected contents
|
||||
// CheckListing checks the fs to see if it has the expected contents
|
||||
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
|
||||
precision := f.Precision()
|
||||
CheckListingWithPrecision(t, f, items, precision)
|
||||
}
|
||||
|
||||
// Parse a time string or explode
|
||||
// Time parses a time string or logs a fatal error
|
||||
func Time(timeString string) time.Time {
|
||||
t, err := time.Parse(time.RFC3339Nano, timeString)
|
||||
if err != nil {
|
||||
@@ -125,7 +161,7 @@ func Time(timeString string) time.Time {
|
||||
return t
|
||||
}
|
||||
|
||||
// Create a random string
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
source := "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
out := make([]byte, n)
|
||||
@@ -135,7 +171,7 @@ func RandomString(n int) string {
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Creates a temporary directory name for local remotes
|
||||
// LocalRemote creates a temporary directory name for local remotes
|
||||
func LocalRemote() (path string, err error) {
|
||||
path, err = ioutil.TempDir("", "rclone")
|
||||
if err == nil {
|
||||
@@ -146,7 +182,7 @@ func LocalRemote() (path string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a random bucket or subdirectory name
|
||||
// RandomRemoteName makes a random bucket or subdirectory name
|
||||
//
|
||||
// Returns a random remote name plus the leaf name
|
||||
func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
@@ -169,7 +205,7 @@ func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
return remoteName, leafName, nil
|
||||
}
|
||||
|
||||
// Make a random bucket or subdirectory on the remote
|
||||
// RandomRemote makes a random bucket or subdirectory on the remote
|
||||
//
|
||||
// Call the finalise function returned to Purge the fs at the end (and
|
||||
// the parent if necessary)
|
||||
@@ -208,6 +244,7 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, func(), error) {
|
||||
return remote, finalise, nil
|
||||
}
|
||||
|
||||
// TestMkdir tests Mkdir works
|
||||
func TestMkdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Mkdir(remote)
|
||||
if err != nil {
|
||||
@@ -216,6 +253,7 @@ func TestMkdir(t *testing.T, remote fs.Fs) {
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
// TestPurge tests Purge works
|
||||
func TestPurge(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Purge(remote)
|
||||
if err != nil {
|
||||
@@ -224,6 +262,7 @@ func TestPurge(t *testing.T, remote fs.Fs) {
|
||||
CheckListing(t, remote, []Item{})
|
||||
}
|
||||
|
||||
// TestRmdir tests Rmdir works
|
||||
func TestRmdir(t *testing.T, remote fs.Fs) {
|
||||
err := fs.Rmdir(remote)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
// Generic tests for testing the Fs and Object interfaces
|
||||
// Package fstests provides generic tests for testing the Fs and Object interfaces
|
||||
//
|
||||
// Run go generate to write the tests for the remotes
|
||||
package fstests
|
||||
|
||||
//go:generate go run gen_tests.go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
@@ -17,26 +22,39 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
remote fs.Fs
|
||||
remote fs.Fs
|
||||
// RemoteName should be set to the name of the remote for testing
|
||||
RemoteName = ""
|
||||
subRemoteName = ""
|
||||
subRemoteLeaf = ""
|
||||
NilObject fs.Object
|
||||
file1 = fstest.Item{
|
||||
// NilObject should be set to a nil Object from the Fs under test
|
||||
NilObject fs.Object
|
||||
file1 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "file name.txt",
|
||||
}
|
||||
file2 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
||||
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`,
|
||||
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _/z.txt`,
|
||||
}
|
||||
dumpHeaders = flag.Bool("dump-headers", false, "Dump HTTP headers - may contain sensitive info")
|
||||
dumpBodies = flag.Bool("dump-bodies", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
|
||||
}
|
||||
|
||||
// TestInit tests basic intitialisation
|
||||
func TestInit(t *testing.T) {
|
||||
var err error
|
||||
fs.LoadConfig()
|
||||
fs.Config.Verbose = false
|
||||
fs.Config.Quiet = true
|
||||
fs.Config.DumpHeaders = *dumpHeaders
|
||||
fs.Config.DumpBodies = *dumpBodies
|
||||
t.Logf("Using remote %q", RemoteName)
|
||||
if RemoteName == "" {
|
||||
RemoteName, err = fstest.LocalRemote()
|
||||
if err != nil {
|
||||
@@ -49,7 +67,7 @@ func TestInit(t *testing.T) {
|
||||
}
|
||||
|
||||
remote, err = fs.NewFs(subRemoteName)
|
||||
if err == fs.NotFoundInConfigFile {
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
log.Printf("Didn't find %q in config file - skipping tests", RemoteName)
|
||||
return
|
||||
}
|
||||
@@ -65,8 +83,7 @@ func skipIfNotOk(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
|
||||
// TestFsString tests the String method
|
||||
func TestFsString(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
str := remote.String()
|
||||
@@ -75,18 +92,13 @@ func TestFsString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type TestFile struct {
|
||||
ModTime time.Time
|
||||
Path string
|
||||
Size int64
|
||||
Md5sum string
|
||||
}
|
||||
|
||||
// TestFsRmdirEmpty tests deleting an empty directory
|
||||
func TestFsRmdirEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestRmdir(t, remote)
|
||||
}
|
||||
|
||||
// TestFsRmdirNotFound tests deleting a non existent directory
|
||||
func TestFsRmdirNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
err := remote.Rmdir()
|
||||
@@ -95,17 +107,20 @@ func TestFsRmdirNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFsMkdir tests tests making a directory
|
||||
func TestFsMkdir(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestMkdir(t, remote)
|
||||
fstest.TestMkdir(t, remote)
|
||||
}
|
||||
|
||||
// TestFsListEmpty tests listing an empty directory
|
||||
func TestFsListEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{})
|
||||
}
|
||||
|
||||
// TestFsListDirEmpty tests listing the directories from an empty directory
|
||||
func TestFsListDirEmpty(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
for obj := range remote.ListDir() {
|
||||
@@ -113,6 +128,7 @@ func TestFsListDirEmpty(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFsNewFsObjectNotFound tests not finding a object
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
if remote.NewFsObject("potato") != nil {
|
||||
@@ -145,21 +161,24 @@ func testPut(t *testing.T, file *fstest.Item) {
|
||||
file.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
// TestFsPutFile1 tests putting a file
|
||||
func TestFsPutFile1(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
testPut(t, &file1)
|
||||
}
|
||||
|
||||
// TestFsPutFile2 tests putting a file into a subdirectory
|
||||
func TestFsPutFile2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
testPut(t, &file2)
|
||||
}
|
||||
|
||||
// TestFsListDirFile2 tests the files are correctly uploaded
|
||||
func TestFsListDirFile2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
found := false
|
||||
for obj := range remote.ListDir() {
|
||||
if obj.Name != `hello? sausage` {
|
||||
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` {
|
||||
t.Errorf("Found unexpected item %q", obj.Name)
|
||||
} else {
|
||||
found = true
|
||||
@@ -170,6 +189,7 @@ func TestFsListDirFile2(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFsListDirRoot tests that DirList works in the root
|
||||
func TestFsListDirRoot(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
rootRemote, err := fs.NewFs(RemoteName)
|
||||
@@ -187,6 +207,7 @@ func TestFsListDirRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFsListRoot tests List works in the root
|
||||
func TestFsListRoot(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
rootRemote, err := fs.NewFs(RemoteName)
|
||||
@@ -195,17 +216,18 @@ func TestFsListRoot(t *testing.T) {
|
||||
}
|
||||
// Should either find file1 and file2 or nothing
|
||||
found1 := false
|
||||
file1 := subRemoteLeaf + "/" + file1.Path
|
||||
f1 := subRemoteLeaf + "/" + file1.Path
|
||||
found2 := false
|
||||
file2 := subRemoteLeaf + "/" + file2.Path
|
||||
f2 := subRemoteLeaf + "/" + file2.Path
|
||||
f2Alt := subRemoteLeaf + "/" + file2.WinPath
|
||||
count := 0
|
||||
errors := fs.Stats.GetErrors()
|
||||
for obj := range rootRemote.List() {
|
||||
count++
|
||||
if obj.Remote() == file1 {
|
||||
if obj.Remote() == f1 {
|
||||
found1 = true
|
||||
}
|
||||
if obj.Remote() == file2 {
|
||||
if obj.Remote() == f2 || obj.Remote() == f2Alt {
|
||||
found2 = true
|
||||
}
|
||||
}
|
||||
@@ -222,25 +244,157 @@ func TestFsListRoot(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", file1, found1, file2, found2, count)
|
||||
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count)
|
||||
}
|
||||
|
||||
// TestFsListFile1 tests file present
|
||||
func TestFsListFile1(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
||||
}
|
||||
|
||||
// TestFsNewFsObject tests NewFsObject
|
||||
func TestFsNewFsObject(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
file1.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
// TestFsListFile1and2 tests two files present
|
||||
func TestFsListFile1and2(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
||||
}
|
||||
|
||||
// TestFsCopy tests Copy
|
||||
func TestFsCopy(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have Copy
|
||||
_, ok := remote.(fs.Copier)
|
||||
if !ok {
|
||||
t.Skip("FS has no Copier interface")
|
||||
}
|
||||
|
||||
var file1Copy = file1
|
||||
file1Copy.Path += "-copy"
|
||||
|
||||
// do the copy
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Copier).Copy(src, file1Copy.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v (%#v)", err, err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file1Copy})
|
||||
|
||||
// Check dst lightly - list above has checked ModTime/Md5sum
|
||||
if dst.Remote() != file1Copy.Path {
|
||||
t.Errorf("object path: want %q got %q", file1Copy.Path, dst.Remote())
|
||||
}
|
||||
|
||||
// Delete copy
|
||||
err = dst.Remove()
|
||||
if err != nil {
|
||||
t.Fatal("Remove copy error", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestFsMove tests Move
|
||||
func TestFsMove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have Move
|
||||
_, ok := remote.(fs.Mover)
|
||||
if !ok {
|
||||
t.Skip("FS has no Mover interface")
|
||||
}
|
||||
|
||||
var file1Move = file1
|
||||
file1Move.Path += "-move"
|
||||
|
||||
// do the move
|
||||
src := findObject(t, file1.Path)
|
||||
dst, err := remote.(fs.Mover).Move(src, file1Move.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("Move failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1Move})
|
||||
|
||||
// Check dst lightly - list above has checked ModTime/Md5sum
|
||||
if dst.Remote() != file1Move.Path {
|
||||
t.Errorf("object path: want %q got %q", file1Move.Path, dst.Remote())
|
||||
}
|
||||
|
||||
// move it back
|
||||
src = findObject(t, file1Move.Path)
|
||||
_, err = remote.(fs.Mover).Move(src, file1.Path)
|
||||
if err != nil {
|
||||
t.Errorf("Move failed: %v", err)
|
||||
}
|
||||
|
||||
// check file exists in new listing
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
|
||||
// TestFsDirMove tests DirMove
|
||||
func TestFsDirMove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
// Check have DirMove
|
||||
_, ok := remote.(fs.DirMover)
|
||||
if !ok {
|
||||
t.Skip("FS has no DirMover interface")
|
||||
}
|
||||
|
||||
// Check it can't move onto itself
|
||||
err := remote.(fs.DirMover).DirMove(remote)
|
||||
if err != fs.ErrorDirExists {
|
||||
t.Errorf("Expecting fs.ErrorDirExists got: %v", err)
|
||||
}
|
||||
|
||||
// new remote
|
||||
newRemote, removeNewRemote, err := fstest.RandomRemote(RemoteName, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create remote: %v", err)
|
||||
}
|
||||
defer removeNewRemote()
|
||||
|
||||
// try the move
|
||||
err = newRemote.(fs.DirMover).DirMove(remote)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to DirMove: %v", err)
|
||||
}
|
||||
|
||||
// check remotes
|
||||
// FIXME: Prints errors.
|
||||
fstest.CheckListing(t, remote, []fstest.Item{})
|
||||
fstest.CheckListing(t, newRemote, []fstest.Item{file2, file1})
|
||||
|
||||
// move it back
|
||||
err = remote.(fs.DirMover).DirMove(newRemote)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to DirMove: %v", err)
|
||||
}
|
||||
|
||||
// check remotes
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2, file1})
|
||||
fstest.CheckListing(t, newRemote, []fstest.Item{})
|
||||
}
|
||||
|
||||
// TestFsRmdirFull tests removing a non empty directory
|
||||
func TestFsRmdirFull(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
err := remote.Rmdir()
|
||||
@@ -249,15 +403,20 @@ func TestFsRmdirFull(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFsPrecision tests the Precision of the Fs
|
||||
func TestFsPrecision(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
precision := remote.Precision()
|
||||
if precision == fs.ModTimeNotSupported {
|
||||
return
|
||||
}
|
||||
if precision > time.Second || precision < 0 {
|
||||
t.Fatalf("Precision out of range %v", precision)
|
||||
}
|
||||
// FIXME check expected precision
|
||||
}
|
||||
|
||||
// TestObjectString tests the Object String method
|
||||
func TestObjectString(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -272,6 +431,7 @@ func TestObjectString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectFs tests the object can be found
|
||||
func TestObjectFs(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -280,6 +440,7 @@ func TestObjectFs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectRemote tests the Remote is correct
|
||||
func TestObjectRemote(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -288,6 +449,7 @@ func TestObjectRemote(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectMd5sum tests the MD5SUM of the object is correct
|
||||
func TestObjectMd5sum(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -295,17 +457,19 @@ func TestObjectMd5sum(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error in Md5sum: %v", err)
|
||||
}
|
||||
if Md5sum != file1.Md5sum {
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectModTime tests the ModTime of the object is correct
|
||||
func TestObjectModTime(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
|
||||
}
|
||||
|
||||
// TestObjectSetModTime tests that SetModTime works
|
||||
func TestObjectSetModTime(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
|
||||
@@ -317,6 +481,7 @@ func TestObjectSetModTime(t *testing.T) {
|
||||
TestObjectModTime(t)
|
||||
}
|
||||
|
||||
// TestObjectSize tests that Size works
|
||||
func TestObjectSize(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -325,6 +490,7 @@ func TestObjectSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectOpen tests that Open works
|
||||
func TestObjectOpen(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -345,11 +511,12 @@ func TestObjectOpen(t *testing.T) {
|
||||
t.Fatalf("in.Close() return error: %v", err)
|
||||
}
|
||||
Md5sum := hex.EncodeToString(hash.Sum(nil))
|
||||
if Md5sum != file1.Md5sum {
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectUpdate tests that Update works
|
||||
func TestObjectUpdate(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
buf := bytes.NewBufferString(fstest.RandomString(200))
|
||||
@@ -369,6 +536,7 @@ func TestObjectUpdate(t *testing.T) {
|
||||
file1.Check(t, obj, remote.Precision())
|
||||
}
|
||||
|
||||
// TestObjectStorable tests that Storable works
|
||||
func TestObjectStorable(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -377,6 +545,7 @@ func TestObjectStorable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestLimitedFs tests that a LimitedFs is created
|
||||
func TestLimitedFs(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
remoteName := subRemoteName + "/" + file2.Path
|
||||
@@ -393,6 +562,7 @@ func TestLimitedFs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestLimitedFsNotFound tests that a LimitedFs is not created if no object
|
||||
func TestLimitedFsNotFound(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
remoteName := subRemoteName + "/not found.txt"
|
||||
@@ -407,6 +577,7 @@ func TestLimitedFsNotFound(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestObjectRemove tests Remove
|
||||
func TestObjectRemove(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
obj := findObject(t, file1.Path)
|
||||
@@ -417,6 +588,7 @@ func TestObjectRemove(t *testing.T) {
|
||||
fstest.CheckListing(t, remote, []fstest.Item{file2})
|
||||
}
|
||||
|
||||
// TestObjectPurge tests Purge
|
||||
func TestObjectPurge(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
fstest.TestPurge(t, remote)
|
||||
@@ -426,6 +598,7 @@ func TestObjectPurge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestFinalise tidies up after the previous tests
|
||||
func TestFinalise(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
if strings.HasPrefix(RemoteName, "/") {
|
||||
|
||||
@@ -45,7 +45,6 @@ type Data struct {
|
||||
FsName string
|
||||
UpperFsName string
|
||||
TestName string
|
||||
ObjectName string
|
||||
Fns []string
|
||||
}
|
||||
|
||||
@@ -65,7 +64,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*{{ .FsName }}.FsObject{{ .ObjectName }})(nil))
|
||||
fstests.NilObject = fs.Object((*{{ .FsName }}.Object)(nil))
|
||||
fstests.RemoteName = "{{ .TestName }}"
|
||||
}
|
||||
|
||||
@@ -79,24 +78,16 @@ func generateTestProgram(t *template.Template, fns []string, Fsname string) {
|
||||
fsname := strings.ToLower(Fsname)
|
||||
TestName := "Test" + Fsname + ":"
|
||||
outfile := "../../" + fsname + "/" + fsname + "_test.go"
|
||||
// Find last capitalised group to be object name
|
||||
matcher := regexp.MustCompile(`([A-Z][a-z0-9]+)$`)
|
||||
matches := matcher.FindStringSubmatch(Fsname)
|
||||
if len(matches) == 0 {
|
||||
log.Fatalf("Couldn't find object name in %q", Fsname)
|
||||
}
|
||||
ObjectName := matches[1]
|
||||
|
||||
if fsname == "local" {
|
||||
TestName = ""
|
||||
}
|
||||
|
||||
data := Data{
|
||||
Regenerate: "go run gen_tests.go or make gen_tests",
|
||||
Regenerate: "make gen_tests",
|
||||
FsName: fsname,
|
||||
UpperFsName: Fsname,
|
||||
TestName: TestName,
|
||||
ObjectName: ObjectName,
|
||||
Fns: fns,
|
||||
}
|
||||
|
||||
@@ -139,5 +130,7 @@ func main() {
|
||||
generateTestProgram(t, fns, "Drive")
|
||||
generateTestProgram(t, fns, "GoogleCloudStorage")
|
||||
generateTestProgram(t, fns, "Dropbox")
|
||||
generateTestProgram(t, fns, "AmazonCloudDrive")
|
||||
generateTestProgram(t, fns, "OneDrive")
|
||||
log.Printf("Done")
|
||||
}
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
// Common authentication between Google Drive and Google Cloud Storage
|
||||
package googleauth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"code.google.com/p/goauth2/oauth"
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// A token cache to save the token in the config file section named
|
||||
type TokenCache string
|
||||
|
||||
// Get the token from the config file - returns an error if it isn't present
|
||||
func (name TokenCache) Token() (*oauth.Token, error) {
|
||||
tokenString, err := fs.ConfigFile.GetValue(string(name), "token")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokenString == "" {
|
||||
return nil, fmt.Errorf("Empty token found - please reconfigure")
|
||||
}
|
||||
token := new(oauth.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
|
||||
}
|
||||
|
||||
// Save the token to the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func (name TokenCache) PutToken(token *oauth.Token) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old := fs.ConfigFile.MustValue(string(name), "token")
|
||||
if tokenString != old {
|
||||
fs.ConfigFile.SetValue(string(name), "token", tokenString)
|
||||
fs.SaveConfig()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Auth contains information to authenticate an app against google services
|
||||
type Auth struct {
|
||||
Scope string
|
||||
DefaultClientId string
|
||||
DefaultClientSecret string
|
||||
}
|
||||
|
||||
// Makes a new transport using authorisation from the config
|
||||
//
|
||||
// Doesn't have a token yet
|
||||
func (auth *Auth) newTransport(name string) (*oauth.Transport, error) {
|
||||
clientId := fs.ConfigFile.MustValue(name, "client_id")
|
||||
if clientId == "" {
|
||||
clientId = auth.DefaultClientId
|
||||
}
|
||||
clientSecret := fs.ConfigFile.MustValue(name, "client_secret")
|
||||
if clientSecret == "" {
|
||||
clientSecret = auth.DefaultClientSecret
|
||||
}
|
||||
|
||||
// Settings for authorization.
|
||||
var config = &oauth.Config{
|
||||
ClientId: clientId,
|
||||
ClientSecret: clientSecret,
|
||||
Scope: auth.Scope,
|
||||
RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
TokenCache: TokenCache(name),
|
||||
}
|
||||
|
||||
t := &oauth.Transport{
|
||||
Config: config,
|
||||
Transport: fs.Config.Transport(),
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Makes a new transport using authorisation from the config with token
|
||||
func (auth *Auth) NewTransport(name string) (*oauth.Transport, error) {
|
||||
t, err := auth.newTransport(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to pull the token from the cache; if this fails, we need to get one.
|
||||
token, err := t.Config.TokenCache.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get token: %s", err)
|
||||
}
|
||||
t.Token = token
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Configuration helper - called after the user has put in the defaults
|
||||
func (auth *Auth) Config(name string) {
|
||||
// See if already have a token
|
||||
tokenString := fs.ConfigFile.MustValue(name, "token")
|
||||
if tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !fs.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get a transport
|
||||
t, err := auth.newTransport(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't make transport: %v", err)
|
||||
}
|
||||
|
||||
// Generate a URL for the user to visit for authorization.
|
||||
authUrl := t.Config.AuthCodeURL("state")
|
||||
fmt.Printf("Go to the following link in your browser\n")
|
||||
fmt.Printf("%s\n", authUrl)
|
||||
fmt.Printf("Log in, then type paste the token that is returned in the browser here\n")
|
||||
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := fs.ReadLine()
|
||||
_, err = t.Exchange(authCode)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get token: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// Google Cloud Storage interface
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -17,22 +17,25 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/googleauth"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientId = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "X4Z3ca8xfWDb1Voo-F9a7ZxJ"
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneClientSecret = "8p/yms3OlNXE9OTDl/HLypf9gdiJ5cT3"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
@@ -41,27 +44,32 @@ const (
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageAuth = &googleauth.Auth{
|
||||
Scope: storage.DevstorageFull_controlScope,
|
||||
DefaultClientId: rcloneClientId,
|
||||
DefaultClientSecret: rcloneClientSecret,
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
fs.Register(&fs.Info{
|
||||
Name: "google cloud storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
storageAuth.Config(name)
|
||||
err := oauthutil.Config(name, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "client_id",
|
||||
Help: "Google Application Client Id - leave blank to use rclone's.",
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: "client_secret",
|
||||
Help: "Google Application Client Secret - leave blank to use rclone's.",
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
@@ -110,8 +118,9 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// FsStorage represents a remote storage server
|
||||
type FsStorage struct {
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
@@ -121,22 +130,35 @@ type FsStorage struct {
|
||||
bucketAcl string // used when creating new buckets
|
||||
}
|
||||
|
||||
// FsObjectStorage describes a storage object
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectStorage struct {
|
||||
storage *FsStorage // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
md5sum string // The MD5Sum of the object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
md5sum string // The MD5Sum of the object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this FsStorage to a string
|
||||
func (f *FsStorage) String() string {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
}
|
||||
@@ -158,11 +180,11 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsStorage from the path, bucket:path
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
t, err := storageAuth.NewTransport(name)
|
||||
oAuthClient, err := oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
|
||||
}
|
||||
|
||||
bucket, directory, err := parsePath(root)
|
||||
@@ -170,7 +192,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &FsStorage{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: fs.ConfigFile.MustValue(name, "project_number"),
|
||||
@@ -185,7 +208,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = t.Client()
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Couldn't create Google Cloud Storage client: %s", err)
|
||||
@@ -214,10 +237,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
o := &FsObjectStorage{
|
||||
storage: f,
|
||||
remote: remote,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
@@ -231,17 +254,17 @@ func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsStorage) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
func (f *Fs) list(directories bool, fn func(string, *storage.Object)) {
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
|
||||
if directories {
|
||||
list = list.Delimiter("/")
|
||||
@@ -251,7 +274,7 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
objects, err := list.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
return
|
||||
}
|
||||
if !directories {
|
||||
@@ -279,14 +302,14 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsStorage) List() fs.ObjectsChan {
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
@@ -301,8 +324,8 @@ func (f *FsStorage) List() fs.ObjectsChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// Lists the buckets
|
||||
func (f *FsStorage) ListDir() fs.DirChan {
|
||||
// ListDir lists the buckets
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// List the buckets
|
||||
@@ -310,7 +333,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
defer close(out)
|
||||
if f.projectNumber == "" {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list buckets without project number")
|
||||
fs.ErrorLog(f, "Can't list buckets without project number")
|
||||
return
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
@@ -318,7 +341,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
buckets, err := listBuckets.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
|
||||
break
|
||||
} else {
|
||||
for _, bucket := range buckets.Items {
|
||||
@@ -356,14 +379,17 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
o := &FsObjectStorage{storage: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsStorage) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
_, err := f.svc.Buckets.Get(f.bucket).Do()
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
@@ -381,51 +407,89 @@ func (f *FsStorage) Mkdir() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *FsStorage) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
return f.svc.Buckets.Delete(f.bucket).Do()
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (fs *FsStorage) Precision() time.Duration {
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectStorage) Fs() fs.Fs {
|
||||
return o.storage
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectStorage) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectStorage) Remote() string {
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectStorage) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectStorage) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
|
||||
@@ -444,9 +508,8 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
} else {
|
||||
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
@@ -461,11 +524,11 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectStorage) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.storage.svc.Objects.Get(o.storage.bucket, o.storage.root+o.remote).Do()
|
||||
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -478,7 +541,7 @@ func (o *FsObjectStorage) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectStorage) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -494,29 +557,29 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
return metadata
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
func (o *FsObjectStorage) SetModTime(modTime time.Time) {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.storage.bucket,
|
||||
Name: o.storage.root + o.remote,
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
|
||||
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
o.setMetaData(newObject)
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectStorage) Storable() bool {
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
// This is slightly complicated by Go here insisting on
|
||||
// decoding the %2F in URLs into / which is legal in http, but
|
||||
// unfortunately not what the storage server wants.
|
||||
@@ -532,7 +595,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
// alter any hex-escaped characters
|
||||
googleapi.SetOpaque(req.URL)
|
||||
req.Header.Set("User-Agent", fs.UserAgent)
|
||||
res, err := o.storage.client.Do(req)
|
||||
res, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -546,16 +609,16 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
object := storage.Object{
|
||||
Bucket: o.storage.bucket,
|
||||
Name: o.storage.root + o.remote,
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(o),
|
||||
Size: uint64(size),
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do()
|
||||
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -565,10 +628,13 @@ func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) er
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectStorage) Remove() error {
|
||||
return o.storage.svc.Objects.Delete(o.storage.bucket, o.storage.root+o.remote).Do()
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsStorage{}
|
||||
var _ fs.Object = &FsObjectStorage{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil))
|
||||
fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil))
|
||||
fstests.RemoteName = "TestGoogleCloudStorage:"
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
BIN
graphics/rclone-50x50.png
Normal file
BIN
graphics/rclone-50x50.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 5.7 KiB |
395
local/local.go
395
local/local.go
@@ -1,11 +1,6 @@
|
||||
// Local filesystem interface
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
// Note that all rclone paths should be / separated. Anything coming
|
||||
// from the filepath module will have \ separators on windows so
|
||||
// should be converted using filepath.ToSlash. Windows is quite happy
|
||||
// with / separators so there is no need to convert them back.
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
@@ -14,32 +9,37 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
fs.Register(&fs.Info{
|
||||
Name: "local",
|
||||
NewFs: NewFs,
|
||||
})
|
||||
}
|
||||
|
||||
// FsLocal represents a local filesystem rooted at root
|
||||
type FsLocal struct {
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
}
|
||||
|
||||
// FsObjectLocal represents a local filesystem object
|
||||
type FsObjectLocal struct {
|
||||
local fs.Fs // The Fs this object is part of
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
path string // The local path
|
||||
info os.FileInfo // Interface for file info (always present)
|
||||
@@ -48,16 +48,22 @@ type FsObjectLocal struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs contstructs an FsLocal from the path
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = filepath.ToSlash(path.Clean(root))
|
||||
f := &FsLocal{root: root}
|
||||
var err error
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
warned: make(map[string]struct{}),
|
||||
}
|
||||
f.root = filterPath(f.cleanUtf8(root))
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
var remote string
|
||||
f.root, remote = getDirFile(f.root)
|
||||
obj := f.NewFsObject(remote)
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(f, obj), nil
|
||||
@@ -65,52 +71,71 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// String converts this FsLocal to a string
|
||||
func (f *FsLocal) String() string {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
}
|
||||
|
||||
// newFsObject makes a half completed Object
|
||||
func (f *Fs) newFsObject(remote string) *Object {
|
||||
remote = filepath.ToSlash(remote)
|
||||
dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote)))
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
}
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
remote = filepath.ToSlash(remote)
|
||||
path := path.Join(f.root, remote)
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: path}
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object {
|
||||
o := f.newFsObject(remote)
|
||||
if info != nil {
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.lstat()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to stat %s: %s", path, err)
|
||||
fs.Debug(o, "Failed to stat %s: %s", o.path, err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsLocal) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// List the path returning a channel of FsObjects
|
||||
//
|
||||
// Ignores everything which isn't Storable, eg links etc
|
||||
func (f *FsLocal) List() fs.ObjectsChan {
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
remote, err := filepath.Rel(f.root, path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to get relative path %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
if remote == "." {
|
||||
@@ -127,46 +152,63 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", f.root, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsLocal) ListDir() fs.DirChan {
|
||||
// CleanUtf8 makes string a valid UTF-8 string
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
func (f *Fs) cleanUtf8(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
name = string([]rune(name))
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
name = cleanWindowsName(f, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// ListDir walks the path returning a channel of FsObjects
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
items, err := ioutil.ReadDir(f.root)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't find read directory: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
|
||||
} else {
|
||||
for _, item := range items {
|
||||
if item.IsDir() {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Name(),
|
||||
Name: f.cleanUtf8(item.Name()),
|
||||
When: item.ModTime(),
|
||||
Bytes: 0,
|
||||
Count: 0,
|
||||
}
|
||||
// Go down the tree to count the files and directories
|
||||
dirpath := path.Join(f.root, item.Name())
|
||||
dirpath := filterPath(filepath.Join(f.root, item.Name()))
|
||||
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
dir.Count += 1
|
||||
dir.Count++
|
||||
dir.Bytes += fi.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", dirpath, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
|
||||
}
|
||||
out <- dir
|
||||
}
|
||||
@@ -177,11 +219,10 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// Puts the FsObject to the local filesystem
|
||||
func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
dstPath := path.Join(f.root, remote)
|
||||
// Put the FsObject to the local filesystem
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction - info filled in by Update()
|
||||
o := &FsObjectLocal{local: f, remote: remote, path: dstPath}
|
||||
o := f.newFsObject(remote)
|
||||
err := o.Update(in, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -190,19 +231,20 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *FsLocal) Mkdir() error {
|
||||
return os.MkdirAll(f.root, 0770)
|
||||
func (f *Fs) Mkdir() error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
return os.MkdirAll(f.root, 0777)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *FsLocal) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
return os.Remove(f.root)
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (f *FsLocal) Precision() (precision time.Duration) {
|
||||
// Precision of the file system
|
||||
func (f *Fs) Precision() (precision time.Duration) {
|
||||
f.precisionOk.Do(func() {
|
||||
f.precision = f.readPrecision()
|
||||
})
|
||||
@@ -210,7 +252,7 @@ func (f *FsLocal) Precision() (precision time.Duration) {
|
||||
}
|
||||
|
||||
// Read the precision
|
||||
func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
// Default precision of 1s
|
||||
precision = time.Second
|
||||
|
||||
@@ -266,7 +308,7 @@ func (f *FsLocal) readPrecision() (precision time.Duration) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsLocal) Purge() error {
|
||||
func (f *Fs) Purge() error {
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -277,35 +319,120 @@ func (f *FsLocal) Purge() error {
|
||||
return os.RemoveAll(f.root)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary FsObject under construction
|
||||
dstObj := f.newFsObject(remote)
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
if os.IsNotExist(err) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.info.Mode().IsRegular() {
|
||||
// It isn't a file
|
||||
return nil, fmt.Errorf("Can't move file onto non-file")
|
||||
}
|
||||
|
||||
// Create destination
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = os.Rename(srcObj.path, dstObj.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Update the info
|
||||
err = dstObj.lstat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src directory to this remote using server side move
|
||||
// operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debug(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
// Check if source exists
|
||||
sstat, err := os.Lstat(srcFs.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// And is a directory
|
||||
if !sstat.IsDir() {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Check if destination exists
|
||||
_, err = os.Lstat(f.root)
|
||||
if !os.IsNotExist(err) {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Do the move
|
||||
return os.Rename(srcFs.root, f.root)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectLocal) Fs() fs.Fs {
|
||||
return o.local
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectLocal) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectLocal) Remote() string {
|
||||
return o.remote
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.fs.cleanUtf8(o.remote)
|
||||
}
|
||||
|
||||
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
|
||||
func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
if o.md5sum != "" {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to open: %s", err)
|
||||
fs.ErrorLog(o, "Failed to open: %s", err)
|
||||
return "", err
|
||||
}
|
||||
hash := md5.New()
|
||||
@@ -313,12 +440,12 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read: %s", err)
|
||||
return "", err
|
||||
}
|
||||
if closeErr != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to close: %s", closeErr)
|
||||
fs.ErrorLog(o, "Failed to close: %s", closeErr)
|
||||
return "", closeErr
|
||||
}
|
||||
o.md5sum = hex.EncodeToString(hash.Sum(nil))
|
||||
@@ -326,17 +453,17 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectLocal) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size()
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *FsObjectLocal) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.info.ModTime()
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
func (o *FsObjectLocal) SetModTime(modTime time.Time) {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to set mtime on file: %s", err)
|
||||
@@ -350,8 +477,8 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) {
|
||||
}
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectLocal) Storable() bool {
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
mode := o.info.Mode()
|
||||
if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
fs.Debug(o, "Can't transfer non file/directory")
|
||||
@@ -366,9 +493,9 @@ func (o *FsObjectLocal) Storable() bool {
|
||||
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
||||
// object that is read
|
||||
type localOpenFile struct {
|
||||
o *FsObjectLocal // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash hash.Hash // currently accumulating MD5
|
||||
o *Object // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash hash.Hash // currently accumulating MD5
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
@@ -393,7 +520,7 @@ func (file *localOpenFile) Close() (err error) {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, err = os.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -407,10 +534,15 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir, _ := getDirFile(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
dir := path.Dir(o.path)
|
||||
err := os.MkdirAll(dir, 0770)
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -444,18 +576,117 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
}
|
||||
|
||||
// Stat a FsObject into info
|
||||
func (o *FsObjectLocal) lstat() error {
|
||||
func (o *Object) lstat() error {
|
||||
info, err := os.Lstat(o.path)
|
||||
o.info = info
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectLocal) Remove() error {
|
||||
func (o *Object) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
}
|
||||
|
||||
// Return the current directory and file from a path
|
||||
// Assumes os.PathSeparator is used.
|
||||
func getDirFile(s string) (string, string) {
|
||||
i := strings.LastIndex(s, string(os.PathSeparator))
|
||||
return s[:i], s[i+1:]
|
||||
}
|
||||
|
||||
func filterPath(s string) string {
|
||||
s = filepath.Clean(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to UNC
|
||||
return uncPath(s)
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Pattern to match a windows absolute path: "c:\" and similar
|
||||
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(s) {
|
||||
return `\\?\` + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
return name2
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsLocal{}
|
||||
var _ fs.Purger = &FsLocal{}
|
||||
var _ fs.Object = &FsObjectLocal{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Local filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package local_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil))
|
||||
fstests.NilObject = fs.Object((*local.Object)(nil))
|
||||
fstests.RemoteName = ""
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
91
local/tests_test.go
Normal file
91
local/tests_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
|
||||
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
|
||||
"C:\\Windows\\Folder",
|
||||
"\\\\?\\C:\\Windows\\Folder",
|
||||
"//?/C:/Windows/Folder",
|
||||
"\\\\?\\UNC\\server\\share\\Desktop",
|
||||
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop",
|
||||
"\\\\?\\UNC\\\\share\\folder\\Desktop",
|
||||
"\\\\server\\share",
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\?\UNC\server\share`,
|
||||
}
|
||||
|
||||
// Test that UNC paths are converted.
|
||||
func TestUncPaths(t *testing.T) {
|
||||
for i, p := range uncTestPaths {
|
||||
unc := uncPath(p)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
// Test we don't add more.
|
||||
unc = uncPath(unc)
|
||||
if unc != uncTestPathsResults[i] {
|
||||
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var utf8Tests = [][2]string{
|
||||
[2]string{"ABC", "ABC"},
|
||||
[2]string{string([]byte{0x80}), "<22>"},
|
||||
[2]string{string([]byte{'a', 0x80, 'b'}), "a<>b"},
|
||||
}
|
||||
|
||||
func TestCleanUtf8(t *testing.T) {
|
||||
f := &Fs{}
|
||||
f.warned = make(map[string]struct{})
|
||||
for _, test := range utf8Tests {
|
||||
got := f.cleanUtf8(test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
[2]string{`c:\temp`, `c:\temp`},
|
||||
[2]string{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
[2]string{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
|
||||
[2]string{"c:/temp", "c:/temp"},
|
||||
[2]string{"/temp/file.txt", "/temp/file.txt"},
|
||||
[2]string{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
|
||||
[2]string{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
for _, test := range testsWindows {
|
||||
got := cleanWindowsName(nil, test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
84
make_manual.py
Executable file
84
make_manual.py
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
outfile = "MANUAL.md"
|
||||
|
||||
# Order to add docs segments to make outfile
|
||||
docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"filtering.md",
|
||||
"overview.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
"swift.md",
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"amazonclouddrive.md",
|
||||
"onedrive.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
"faq.md",
|
||||
"licence.md",
|
||||
"authors.md",
|
||||
"contact.md",
|
||||
]
|
||||
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"donate.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
"""Read file as a string"""
|
||||
path = os.path.join(docpath, doc)
|
||||
with open(path) as fd:
|
||||
contents = fd.read()
|
||||
parts = contents.split("---\n", 2)
|
||||
if len(parts) != 3:
|
||||
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||
contents = parts[2].strip()+"\n\n"
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
|
||||
return contents
|
||||
|
||||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
out.write(read_doc(doc))
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
19
notes.txt
19
notes.txt
@@ -1,3 +1,11 @@
|
||||
Perhaps make Md5sum() and Modtime() optional. Define the zero values
|
||||
"" and 0. Make it so we can support remotes which can't do those.
|
||||
|
||||
Fix the docs
|
||||
* factor the README.md into the docs directory
|
||||
* create it as part of make by assembling other parts
|
||||
* write long docs about each flag
|
||||
|
||||
Change lsd command so it doesn't show -1
|
||||
* Make sure all Fses show -1 for objects Zero for dates etc
|
||||
* Make test?
|
||||
@@ -7,6 +15,7 @@ Make test_all.sh use the TestRemote name automatically
|
||||
|
||||
Run errcheck and go vet in the make file
|
||||
.. Also race detector?
|
||||
.. go tool vet -shadow
|
||||
|
||||
Get rid of Storable?
|
||||
|
||||
@@ -48,6 +57,16 @@ Ideas
|
||||
* control times sync (which is slow with some remotes) with -a --archive flag?
|
||||
* Copy a glob pattern - could do with LimitedFs
|
||||
|
||||
Make an encryption layer.
|
||||
|
||||
This would layer over the source FS to
|
||||
* decrypt all gets
|
||||
* encrypt all puts
|
||||
* encrypt file names in list
|
||||
* decrypt them in list
|
||||
|
||||
Would like to be able to see unencrypted file names in remote though? How? Or is that two encryption layers..?
|
||||
|
||||
Bugs
|
||||
* Non verbose - not sure number transferred got counted up? CHECK
|
||||
* When doing copy it recurses the whole of the destination FS which isn't necessary
|
||||
|
||||
328
oauthutil/oauthutil.go
Normal file
328
oauthutil/oauthutil.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package oauthutil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// ConfigToken is the key used to store the token under
|
||||
ConfigToken = "token"
|
||||
|
||||
// ConfigClientID is the config key used to store the client id
|
||||
ConfigClientID = "client_id"
|
||||
|
||||
// ConfigClientSecret is the config key used to store the client secret
|
||||
ConfigClientSecret = "client_secret"
|
||||
|
||||
// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization
|
||||
// code should be returned in the title bar of the browser, with the page text
|
||||
// prompting the user to copy the code and paste it in the application.
|
||||
TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob"
|
||||
|
||||
// bindPort is the port that we bind the local webserver to
|
||||
bindPort = "53682"
|
||||
|
||||
// bindAddress is binding for local webserver when active
|
||||
bindAddress = "127.0.0.1:" + bindPort
|
||||
|
||||
// RedirectURL is redirect to local webserver when active
|
||||
RedirectURL = "http://" + bindAddress + "/"
|
||||
|
||||
// RedirectPublicURL is redirect to local webserver when active with public name
|
||||
RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/"
|
||||
)
|
||||
|
||||
// oldToken contains an end-user's tokens.
|
||||
// This is the data you must store to persist authentication.
|
||||
//
|
||||
// From the original code.google.com/p/goauth2/oauth package - used
|
||||
// for backwards compatibility in the rclone config file
|
||||
type oldToken struct {
|
||||
AccessToken string
|
||||
RefreshToken string
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// getToken returns the token saved in the config file under
|
||||
// section name.
|
||||
func getToken(name string) (*oauth2.Token, error) {
|
||||
tokenString, err := fs.ConfigFile.GetValue(string(name), ConfigToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tokenString == "" {
|
||||
return nil, fmt.Errorf("Empty token found - please run rclone config again")
|
||||
}
|
||||
token := new(oauth2.Token)
|
||||
err = json.Unmarshal([]byte(tokenString), token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if has data then return it
|
||||
if token.AccessToken != "" && token.RefreshToken != "" {
|
||||
return token, nil
|
||||
}
|
||||
// otherwise try parsing as oldToken
|
||||
oldtoken := new(oldToken)
|
||||
err = json.Unmarshal([]byte(tokenString), oldtoken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Fill in result into new token
|
||||
token.AccessToken = oldtoken.AccessToken
|
||||
token.RefreshToken = oldtoken.RefreshToken
|
||||
token.Expiry = oldtoken.Expiry
|
||||
// Save new format in config file
|
||||
err = putToken(name, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// putToken stores the token in the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func putToken(name string, token *oauth2.Token) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old := fs.ConfigFile.MustValue(name, ConfigToken)
|
||||
if tokenString != old {
|
||||
fs.ConfigFile.SetValue(name, ConfigToken, tokenString)
|
||||
fs.SaveConfig()
|
||||
fs.Debug(name, "Saving new token in config file")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tokenSource stores updated tokens in the config file
|
||||
type tokenSource struct {
|
||||
Name string
|
||||
TokenSource oauth2.TokenSource
|
||||
OldToken oauth2.Token
|
||||
}
|
||||
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
//
|
||||
// This saves the token in the config file if it has changed
|
||||
func (ts *tokenSource) Token() (*oauth2.Token, error) {
|
||||
token, err := ts.TokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if *token != ts.OldToken {
|
||||
err = putToken(ts.Name, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ oauth2.TokenSource = (*tokenSource)(nil)
|
||||
|
||||
// Context returns a context with our HTTP Client baked in for oauth2
|
||||
func Context() context.Context {
|
||||
return context.WithValue(nil, oauth2.HTTPClient, fs.Config.Client())
|
||||
}
|
||||
|
||||
// overrideCredentials sets the ClientID and ClientSecret from the
|
||||
// config file if they are not blank
|
||||
func overrideCredentials(name string, config *oauth2.Config) {
|
||||
ClientID := fs.ConfigFile.MustValue(name, ConfigClientID)
|
||||
if ClientID != "" {
|
||||
config.ClientID = ClientID
|
||||
}
|
||||
ClientSecret := fs.ConfigFile.MustValue(name, ConfigClientSecret)
|
||||
if ClientSecret != "" {
|
||||
config.ClientSecret = ClientSecret
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it
|
||||
func NewClient(name string, config *oauth2.Config) (*http.Client, error) {
|
||||
overrideCredentials(name, config)
|
||||
token, err := getToken(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set our own http client in the context
|
||||
ctx := Context()
|
||||
|
||||
// Wrap the TokenSource in our TokenSource which saves changed
|
||||
// tokens in the config file
|
||||
ts := &tokenSource{
|
||||
Name: name,
|
||||
OldToken: *token,
|
||||
TokenSource: config.TokenSource(ctx, token),
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), nil
|
||||
|
||||
}
|
||||
|
||||
// Config does the initial creation of the token
|
||||
//
|
||||
// It may run an internal webserver to receive the results
|
||||
func Config(name string, config *oauth2.Config) error {
|
||||
overrideCredentials(name, config)
|
||||
// See if already have a token
|
||||
tokenString := fs.ConfigFile.MustValue(name, "token")
|
||||
if tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !fs.Confirm() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Detect whether we should use internal web server
|
||||
useWebServer := false
|
||||
switch config.RedirectURL {
|
||||
case RedirectURL, RedirectPublicURL:
|
||||
useWebServer = true
|
||||
case TitleBarRedirectURL:
|
||||
fmt.Printf("Use auto config?\n")
|
||||
fmt.Printf(" * Say Y if not sure\n")
|
||||
fmt.Printf(" * Say N if you are working on a remote or headless machine or Y didn't work\n")
|
||||
useWebServer = fs.Confirm()
|
||||
if useWebServer {
|
||||
// copy the config and set to use the internal webserver
|
||||
configCopy := *config
|
||||
config = &configCopy
|
||||
config.RedirectURL = RedirectURL
|
||||
}
|
||||
}
|
||||
|
||||
// Make random state
|
||||
stateBytes := make([]byte, 16)
|
||||
_, err := rand.Read(stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state := fmt.Sprintf("%x", stateBytes)
|
||||
authURL := config.AuthCodeURL(state)
|
||||
|
||||
// Prepare webserver
|
||||
server := authServer{
|
||||
state: state,
|
||||
bindAddress: bindAddress,
|
||||
authURL: authURL,
|
||||
}
|
||||
if useWebServer {
|
||||
server.code = make(chan string, 1)
|
||||
go server.Start()
|
||||
defer server.Stop()
|
||||
authURL = "http://" + bindAddress + "/auth"
|
||||
}
|
||||
|
||||
// Generate a URL for the user to visit for authorization.
|
||||
_ = open.Start(authURL)
|
||||
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
|
||||
fmt.Printf("Log in and authorize rclone for access\n")
|
||||
|
||||
var authCode string
|
||||
if useWebServer {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Waiting for code...\n")
|
||||
authCode = <-server.code
|
||||
if authCode != "" {
|
||||
fmt.Printf("Got code\n")
|
||||
} else {
|
||||
return fmt.Errorf("Failed to get code")
|
||||
}
|
||||
} else {
|
||||
// Read the code, and exchange it for a token.
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode = fs.ReadLine()
|
||||
}
|
||||
token, err := config.Exchange(oauth2.NoContext, authCode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get token: %v", err)
|
||||
}
|
||||
return putToken(name, token)
|
||||
}
|
||||
|
||||
// Local web server for collecting auth
|
||||
type authServer struct {
|
||||
state string
|
||||
listener net.Listener
|
||||
bindAddress string
|
||||
code chan string
|
||||
authURL string
|
||||
}
|
||||
|
||||
// startWebServer runs an internal web server to receive config details
|
||||
func (s *authServer) Start() {
|
||||
fs.Debug(nil, "Starting auth server on %s", s.bindAddress)
|
||||
mux := http.NewServeMux()
|
||||
server := &http.Server{
|
||||
Addr: s.bindAddress,
|
||||
Handler: mux,
|
||||
}
|
||||
mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Error(w, "", 404)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, s.authURL, 307)
|
||||
return
|
||||
})
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
fs.Debug(nil, "Received request on auth server")
|
||||
code := req.FormValue("code")
|
||||
if code != "" {
|
||||
state := req.FormValue("state")
|
||||
if state != s.state {
|
||||
fs.Debug(nil, "State did not match: want %q got %q", s.state, state)
|
||||
fmt.Fprintf(w, "<h1>Failure</h1>\n<p>Auth state doesn't match</p>")
|
||||
} else {
|
||||
fs.Debug(nil, "Successfully got code")
|
||||
if s.code != nil {
|
||||
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Go back to rclone to continue</p>")
|
||||
s.code <- code
|
||||
} else {
|
||||
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Cut and paste this code into rclone: <code>%s</code></p>", code)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
fs.Debug(nil, "No code found on request")
|
||||
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found.")
|
||||
http.Error(w, "", 500)
|
||||
})
|
||||
|
||||
var err error
|
||||
s.listener, err = net.Listen("tcp", s.bindAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start auth webserver: %v", err)
|
||||
}
|
||||
err = server.Serve(s.listener)
|
||||
fs.Debug(nil, "Closed auth server with error: %v", err)
|
||||
}
|
||||
|
||||
func (s *authServer) Stop() {
|
||||
fs.Debug(nil, "Closing auth server")
|
||||
if s.code != nil {
|
||||
close(s.code)
|
||||
s.code = nil
|
||||
}
|
||||
_ = s.listener.Close()
|
||||
}
|
||||
141
onedrive/api/api.go
Normal file
141
onedrive/api/api.go
Normal file
@@ -0,0 +1,141 @@
|
||||
// Package api implements the API for one drive
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
rootURL = "https://api.onedrive.com/v1.0" // root URL for requests
|
||||
)
|
||||
|
||||
// Client contains the info to sustain the API
|
||||
type Client struct {
|
||||
c *http.Client
|
||||
}
|
||||
|
||||
// NewClient takes an oauth http.Client and makes a new api instance
|
||||
func NewClient(c *http.Client) *Client {
|
||||
return &Client{
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Opts contains parameters for Call, CallJSON etc
|
||||
type Opts struct {
|
||||
Method string
|
||||
Path string
|
||||
Absolute bool // Path is absolute
|
||||
Body io.Reader
|
||||
NoResponse bool // set to close Body
|
||||
ContentType string
|
||||
ContentLength *int64
|
||||
ContentRange string
|
||||
ExtraHeaders map[string]string
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
cerr := c.Close()
|
||||
if *err == nil {
|
||||
*err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeJSON decodes resp.Body into result
|
||||
func DecodeJSON(resp *http.Response, result interface{}) (err error) {
|
||||
defer checkClose(resp.Body, &err)
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
return decoder.Decode(result)
|
||||
}
|
||||
|
||||
// Call makes the call and returns the http.Response
|
||||
//
|
||||
// if err != nil then resp.Body will need to be closed
|
||||
//
|
||||
// it will return resp if at all possible, even if err is set
|
||||
func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
if opts == nil {
|
||||
return nil, fmt.Errorf("call() called with nil opts")
|
||||
}
|
||||
var url string
|
||||
if opts.Absolute {
|
||||
url = opts.Path
|
||||
} else {
|
||||
url = rootURL + opts.Path
|
||||
}
|
||||
req, err := http.NewRequest(opts.Method, url, opts.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if opts.ContentType != "" {
|
||||
req.Header.Add("Content-Type", opts.ContentType)
|
||||
}
|
||||
if opts.ContentLength != nil {
|
||||
req.ContentLength = *opts.ContentLength
|
||||
}
|
||||
if opts.ContentRange != "" {
|
||||
req.Header.Add("Content-Range", opts.ContentRange)
|
||||
}
|
||||
if opts.ExtraHeaders != nil {
|
||||
for k, v := range opts.ExtraHeaders {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
req.Header.Add("User-Agent", fs.UserAgent)
|
||||
resp, err = api.c.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
// Decode error response
|
||||
errResponse := new(Error)
|
||||
err = DecodeJSON(resp, &errResponse)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if errResponse.ErrorInfo.Code == "" {
|
||||
errResponse.ErrorInfo.Code = resp.Status
|
||||
}
|
||||
return resp, errResponse
|
||||
}
|
||||
if opts.NoResponse {
|
||||
return resp, resp.Body.Close()
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
//
|
||||
// If request is not nil then it will be JSON encoded as the body of the request
|
||||
//
|
||||
// It will return resp if at all possible, even if err is set
|
||||
func (api *Client) CallJSON(opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||
// Set the body up as a JSON object if required
|
||||
if opts.Body == nil && request != nil {
|
||||
body, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var newOpts = *opts
|
||||
newOpts.Body = bytes.NewBuffer(body)
|
||||
newOpts.ContentType = "application/json"
|
||||
opts = &newOpts
|
||||
}
|
||||
resp, err = api.Call(opts)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
if opts.NoResponse {
|
||||
return resp, nil
|
||||
}
|
||||
err = DecodeJSON(resp, response)
|
||||
return resp, err
|
||||
}
|
||||
213
onedrive/api/types.go
Normal file
213
onedrive/api/types.go
Normal file
@@ -0,0 +1,213 @@
|
||||
// Types passed and returned to and from the API
|
||||
|
||||
package api
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
type Error struct {
|
||||
ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
InnerError struct {
|
||||
Code string `json:"code"`
|
||||
} `json:"innererror"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := e.ErrorInfo.Code
|
||||
if e.ErrorInfo.InnerError.Code != "" {
|
||||
out += ": " + e.ErrorInfo.InnerError.Code
|
||||
}
|
||||
out += ": " + e.ErrorInfo.Message
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
// can be a user, device, or application.
|
||||
type Identity struct {
|
||||
DisplayName string `json:"displayName"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||
// to represent a set of identities associated with various events for
|
||||
// an item, such as created by or last modified by.
|
||||
type IdentitySet struct {
|
||||
User Identity `json:"user"`
|
||||
Application Identity `json:"application"`
|
||||
Device Identity `json:"device"`
|
||||
}
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
type Quota struct {
|
||||
Total int `json:"total"`
|
||||
Used int `json:"used"`
|
||||
Remaining int `json:"remaining"`
|
||||
Deleted int `json:"deleted"`
|
||||
State string `json:"state"` // normal | nearing | critical | exceeded
|
||||
}
|
||||
|
||||
// Drive is a representation of a drive resource
|
||||
type Drive struct {
|
||||
ID string `json:"id"`
|
||||
DriveType string `json:"driveType"`
|
||||
Owner IdentitySet `json:"owner"`
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
out = (*time.Time)(t).UTC().AppendFormat(out, timeFormat)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ItemReference groups data needed to reference a OneDrive item
|
||||
// across the service into a single structure.
|
||||
type ItemReference struct {
|
||||
DriveID string `json:"driveId"` // Unique identifier for the Drive that contains the item. Read-only.
|
||||
ID string `json:"id"` // Unique identifier for the item. Read/Write.
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
type FolderFacet struct {
|
||||
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||
}
|
||||
|
||||
// HashesType groups different types of hashes into a single structure, for an item on OneDrive.
|
||||
type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // base64 encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // base64 encoded CRC32 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
type FileFacet struct {
|
||||
MimeType string `json:"mimeType"` // The MIME type for the file. This is determined by logic on the server and might not be the value provided when the file was uploaded.
|
||||
Hashes HashesType `json:"hashes"` // Hashes of the file's binary content, if available.
|
||||
}
|
||||
|
||||
// FileSystemInfoFacet contains properties that are reported by the
|
||||
// device's local file system for the local version of an item. This
|
||||
// facet can be used to specify the last modified date or created date
|
||||
// of the item as it was on the local device.
|
||||
type FileSystemInfoFacet struct {
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
|
||||
}
|
||||
|
||||
// DeletedFacet indicates that the item on OneDrive has been
|
||||
// deleted. In this version of the API, the presence (non-null) of the
|
||||
// facet value indicates that the file was deleted. A null (or
|
||||
// missing) value indicates that the file is not deleted.
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
|
||||
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
}
|
||||
|
||||
// ViewDeltaResponse is the response to the view delta method
|
||||
type ViewDeltaResponse struct {
|
||||
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
|
||||
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
|
||||
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
|
||||
DeltaToken string `json:"@delta.token"` // A token value that can be used in the query string on manually-crafted calls to view.delta. Not needed if you're using nextLink and deltaLink.
|
||||
}
|
||||
|
||||
// ListChildrenResponse is the response to the list children method
|
||||
type ListChildrenResponse struct {
|
||||
Value []Item `json:"value"` // An array of Item objects
|
||||
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of items.
|
||||
}
|
||||
|
||||
// CreateItemRequest is the request to create an item object
|
||||
type CreateItemRequest struct {
|
||||
Name string `json:"name"` // Name of the folder to be created.
|
||||
Folder FolderFacet `json:"folder"` // Empty Folder facet to indicate that folder is the type of resource to be created.
|
||||
ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default).
|
||||
}
|
||||
|
||||
// SetFileSystemInfo is used to Update an object's FileSystemInfo.
|
||||
type SetFileSystemInfo struct {
|
||||
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
// CreateUploadResponse is the response from creating an upload session
|
||||
type CreateUploadResponse struct {
|
||||
UploadURL string `json:"uploadUrl"` // "https://sn3302.up.1drv.com/up/fe6987415ace7X4e1eF866337",
|
||||
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||
}
|
||||
|
||||
// UploadFragmentResponse is the response from uploading a fragment
|
||||
type UploadFragmentResponse struct {
|
||||
ExpirationDateTime Timestamp `json:"expirationDateTime"` // "2015-01-29T09:21:55.523Z",
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"` // ["0-"]
|
||||
}
|
||||
|
||||
// CopyItemRequest is the request to copy an item object
|
||||
//
|
||||
// Note: The parentReference should include either an id or path but
|
||||
// not both. If both are included, they need to reference the same
|
||||
// item or an error will occur.
|
||||
type CopyItemRequest struct {
|
||||
ParentReference ItemReference `json:"parentReference"` // Reference to the parent item the copy will be created in.
|
||||
Name *string `json:"name"` // Optional The new name for the copy. If this isn't provided, the same name will be used as the original.
|
||||
}
|
||||
|
||||
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
|
||||
//
|
||||
// The following API calls return AsyncOperationStatus resources:
|
||||
//
|
||||
// Copy Item
|
||||
// Upload From URL
|
||||
type AsyncOperationStatus struct {
|
||||
Operation string `json:"operation"` // The type of job being run.
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
947
onedrive/onedrive.go
Normal file
947
onedrive/onedrive.go
Normal file
@@ -0,0 +1,947 @@
|
||||
// Package onedrive provides an interface to the Microsoft One Drive
|
||||
// object storage system.
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/onedrive/api"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/spf13/pflag"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "0000000044165769"
|
||||
rcloneClientSecret = "0+be4+jYw+7018HY6P3t/Izo+pTc+Yvt8+fy8NHU094="
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"wl.signin", // Allow single sign-on capabilities
|
||||
"wl.offline_access", // Allow receiving a refresh token
|
||||
"onedrive.readwrite", // r/w perms to all of a user's OneDrive files
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://login.live.com/oauth20_authorize.srf",
|
||||
TokenURL: "https://login.live.com/oauth20_token.srf",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.Reveal(rcloneClientSecret),
|
||||
RedirectURL: oauthutil.RedirectPublicURL,
|
||||
}
|
||||
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.Info{
|
||||
Name: "onedrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config(name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: oauthutil.ConfigClientID,
|
||||
Help: "Microsoft App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: oauthutil.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
pflag.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||
pflag.VarP(&uploadCutoff, "onedrive-upload-cutoff", "", "Cutoff for switching to chunked upload - must be <= 100MB")
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
srv *api.Client // the connection to the one drive server
|
||||
root string // the path we are working on
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a one drive object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
hasMetaData bool // whether info below has been set
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Pattern to match a one drive path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an one drive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/root:/" + replaceReservedChars(path),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, err := oauthutil.NewClient(name, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure One Drive: %v", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
srv: api.NewClient(oAuthClient),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
// Get rootID
|
||||
rootInfo, _, err := f.readMetaDataForPath("")
|
||||
if err != nil || rootInfo.ID == "" {
|
||||
return nil, fmt.Errorf("Failed to get root: %v", err)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, rootInfo.ID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newF := *f
|
||||
newF.dirCache = dircache.New(newRoot, rootInfo.ID, &newF)
|
||||
newF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = newF.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
obj := newF.newObjectWithInfo(remote, nil)
|
||||
if obj == nil {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
// return a Fs Limited to this object
|
||||
return fs.NewLimited(&newF, obj), nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
return f.root
|
||||
}
|
||||
return f.root + "/"
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// NewFsObject returns an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
parent, ok := f.dirCache.GetInv(pathID)
|
||||
if !ok {
|
||||
return "", false, fmt.Errorf("Couldn't find parent ID")
|
||||
}
|
||||
path := leaf
|
||||
if parent != "" {
|
||||
path = parent + "/" + path
|
||||
}
|
||||
if f.dirCache.FoundRoot() {
|
||||
path = f.rootSlash() + path
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPath(path)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.Folder == nil {
|
||||
return "", false, fmt.Errorf("Found file when looking for folder")
|
||||
}
|
||||
return info.ID, true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/items/" + pathID + "/children",
|
||||
}
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.ID, nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
// User function to process a File item from listAll
|
||||
//
|
||||
// Should return true to finish processing
|
||||
type listAllFn func(*api.Item) bool
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/items/" + dirID + "/children?top=1000",
|
||||
}
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't list files: %v", err)
|
||||
break
|
||||
}
|
||||
if len(result.Value) == 0 {
|
||||
break
|
||||
}
|
||||
for i := range result.Value {
|
||||
item := &result.Value[i]
|
||||
isFolder := item.Folder != nil
|
||||
if isFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if directoriesOnly {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
}
|
||||
}
|
||||
if result.NextLink == "" {
|
||||
break
|
||||
}
|
||||
opts.Path = result.NextLink
|
||||
opts.Absolute = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Path should be directory path either "" or "path/"
|
||||
//
|
||||
// List the directory using a recursive list from the root
|
||||
//
|
||||
// This fetches the minimum amount of stuff but does more API calls
|
||||
// which makes it slow
|
||||
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
|
||||
var subError error
|
||||
// Make the API request
|
||||
var wg sync.WaitGroup
|
||||
_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
|
||||
// Recurse on directories
|
||||
if info.Folder != nil {
|
||||
wg.Add(1)
|
||||
folder := path + info.Name + "/"
|
||||
fs.Debug(f, "Reading %s", folder)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := f.listDirRecursive(info.ID, folder, out)
|
||||
if err != nil {
|
||||
subError = err
|
||||
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
fs.Debug(f, "Finished reading %s", path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if subError != nil {
|
||||
return subError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List walks the path returning a channel of Objects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// ListDir lists the directories
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
go func() {
|
||||
defer close(out)
|
||||
err := f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
When: time.Time(item.LastModifiedDateTime),
|
||||
}
|
||||
if item.Folder != nil {
|
||||
dir.Count = item.Folder.ChildCount
|
||||
}
|
||||
out <- dir
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindPath(remote, true)
|
||||
if err != nil {
|
||||
return nil, leaf, directoryID, err
|
||||
}
|
||||
// Temporary Object under construction
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
}
|
||||
|
||||
// Put the object into the container
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, o.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.dirCache.FindRoot(true)
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := api.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/drive/items/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(check bool) error {
|
||||
if f.root == "" {
|
||||
return fmt.Errorf("Can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := dc.RootID()
|
||||
item, _, err := f.readMetaDataForPath(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item.Folder == nil {
|
||||
return fmt.Errorf("Not a folder")
|
||||
}
|
||||
if check && item.Folder.ChildCount != 0 {
|
||||
return fmt.Errorf("Folder not empty")
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir() error {
|
||||
return f.purgeCheck(true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// waitForJob waits for the job with status in url to complete
|
||||
func (f *Fs) waitForJob(location string, o *Object) error {
|
||||
deadline := time.Now().Add(fs.Config.Timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: location,
|
||||
Absolute: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode == 202 {
|
||||
var status api.AsyncOperationStatus
|
||||
err = api.DecodeJSON(resp, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status.Status == "failed" || status.Status == "deleteFailed" {
|
||||
return fmt.Errorf("Async operation %q returned %q", status.Operation, status.Status)
|
||||
}
|
||||
} else {
|
||||
var info api.Item
|
||||
err = api.DecodeJSON(resp, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(&info)
|
||||
return nil
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return fmt.Errorf("Async operation didn't complete after %v", fs.Config.Timeout)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/items/" + srcObj.id + "/action.copy",
|
||||
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
|
||||
NoResponse: true,
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copy := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read location header
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
return nil, fmt.Errorf("Didn't receive location header in copy response")
|
||||
}
|
||||
|
||||
// Wait for job to finish
|
||||
err = f.waitForJob(location, dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck(false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
return "", nil // not supported by one drive
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
}
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
if info.FileSystemInfo != nil {
|
||||
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
|
||||
} else {
|
||||
o.modTime = time.Time(info.LastModifiedDateTime)
|
||||
}
|
||||
o.id = info.ID
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
// leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
info, _, err := o.fs.readMetaDataForPath(o.srvPath())
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts := api.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/root:/" + o.srvPath(),
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
LastModifiedDateTime: api.Timestamp(modTime),
|
||||
},
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
info, err := o.setModTime(modTime)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %v", err)
|
||||
}
|
||||
o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing whether this object storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, fmt.Errorf("Can't download no id")
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := api.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/items/" + o.id + "/content",
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession() (response *api.CreateUploadResponse, err error) {
|
||||
opts := api.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/root:/" + o.srvPath() + ":/upload.createSession",
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(url string, start int64, totalSize int64, buf []byte) (err error) {
|
||||
bufSize := int64(len(buf))
|
||||
opts := api.Opts{
|
||||
Method: "PUT",
|
||||
Path: url,
|
||||
Absolute: true,
|
||||
ContentLength: &bufSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+bufSize-1, totalSize),
|
||||
Body: bytes.NewReader(buf),
|
||||
}
|
||||
var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cancelUploadSession cancels an upload session
|
||||
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
opts := api.Opts{
|
||||
Method: "DELETE",
|
||||
Path: url,
|
||||
Absolute: true,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
|
||||
if chunkSize%(320*1024) != 0 {
|
||||
return fmt.Errorf("Chunk size %d is not a multiple of 320k", chunkSize)
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debug(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadURL := session.UploadURL
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debug(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Log(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
buf := make([]byte, int64(chunkSize))
|
||||
for remaining > 0 {
|
||||
n := int64(chunkSize)
|
||||
if remaining < n {
|
||||
n = remaining
|
||||
buf = buf[:n]
|
||||
}
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
err = o.uploadFragment(uploadURL, position, size, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remaining -= n
|
||||
position += n
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) (err error) {
|
||||
var info *api.Item
|
||||
if size <= int64(uploadCutoff) {
|
||||
// This is for less than 100 MB of content
|
||||
var resp *http.Response
|
||||
opts := api.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/drive/root:/" + o.srvPath() + ":/content",
|
||||
Body: in,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
info, err = o.setModTime(modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
// _ fs.Copier = (*Fs)(nil)
|
||||
// _ fs.Mover = (*Fs)(nil)
|
||||
// _ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
56
onedrive/onedrive_test.go
Normal file
56
onedrive/onedrive_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Test OneDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package onedrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/ncw/rclone/onedrive"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*onedrive.Object)(nil))
|
||||
fstests.RemoteName = "TestOneDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewFsObjectNotFound(t *testing.T) { fstests.TestFsNewFsObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectMd5sum(t *testing.T) { fstests.TestObjectMd5sum(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestLimitedFs(t *testing.T) { fstests.TestLimitedFs(t) }
|
||||
func TestLimitedFsNotFound(t *testing.T) { fstests.TestLimitedFsNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
91
onedrive/replace.go
Normal file
91
onedrive/replace.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Translate file names for one drive
|
||||
|
||||
OneDrive reserved characters
|
||||
|
||||
The following characters are OneDrive reserved characters, and can't
|
||||
be used in OneDrive folder and file names.
|
||||
|
||||
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||
onedrive-business-reserved
|
||||
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||
|
||||
Note: Folder names can't end with a period (.).
|
||||
|
||||
Note: OneDrive for Business file or folder names cannot begin with a
|
||||
tilde ('~').
|
||||
|
||||
*/
|
||||
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
30
onedrive/replace_test.go
Normal file
30
onedrive/replace_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package onedrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
293
pacer/pacer.go
Normal file
293
pacer/pacer.go
Normal file
@@ -0,0 +1,293 @@
|
||||
// Package pacer makes pacing and retrying API calls easy
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Pacer state
|
||||
type Pacer struct {
|
||||
mu sync.Mutex // Protecting read/writes
|
||||
minSleep time.Duration // minimum sleep time
|
||||
maxSleep time.Duration // maximum sleep time
|
||||
decayConstant uint // decay constant
|
||||
pacer chan struct{} // To pace the operations
|
||||
sleepTime time.Duration // Time to sleep for each transaction
|
||||
retries int // Max number of retries
|
||||
maxConnections int // Maximum number of concurrent connections
|
||||
connTokens chan struct{} // Connection tokens
|
||||
calculatePace func(bool) // switchable pacing algorithm - call with mu held
|
||||
consecutiveRetries int // number of consecutive retries
|
||||
}
|
||||
|
||||
// Type is for selecting different pacing algorithms
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// DefaultPacer is a truncated exponential attack and decay.
|
||||
//
|
||||
// On retries the sleep time is doubled, on non errors then
|
||||
// sleeptime decays according to the decay constant as set
|
||||
// with SetDecayConstant.
|
||||
//
|
||||
// The sleep never goes below that set with SetMinSleep or
|
||||
// above that set with SetMaxSleep.
|
||||
DefaultPacer = Type(iota)
|
||||
|
||||
// AmazonCloudDrivePacer is a specialised pacer for Amazon Cloud Drive
|
||||
//
|
||||
// It implements a truncated exponential backoff strategy with
|
||||
// randomization. Normally operations are paced at the
|
||||
// interval set with SetMinSleep. On errors the sleep timer
|
||||
// is set to 0..2**retries seconds.
|
||||
//
|
||||
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
||||
AmazonCloudDrivePacer
|
||||
)
|
||||
|
||||
// Paced is a function which is called by the Call and CallNoRetry
|
||||
// methods. It should return a boolean, true if it would like to be
|
||||
// retried, and an error. This error may be returned or returned
|
||||
// wrapped in a RetryError.
|
||||
type Paced func() (bool, error)
|
||||
|
||||
// New returns a Pacer with sensible defaults
|
||||
func New() *Pacer {
|
||||
p := &Pacer{
|
||||
minSleep: 10 * time.Millisecond,
|
||||
maxSleep: 2 * time.Second,
|
||||
decayConstant: 2,
|
||||
retries: 10,
|
||||
pacer: make(chan struct{}, 1),
|
||||
}
|
||||
p.sleepTime = p.minSleep
|
||||
p.SetPacer(DefaultPacer)
|
||||
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
||||
|
||||
// Put the first pacing token in
|
||||
p.pacer <- struct{}{}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMinSleep sets the minimum sleep time for the pacer
|
||||
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.minSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxSleep sets the maximum sleep time for the pacer
|
||||
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxSleep = t
|
||||
p.sleepTime = p.minSleep
|
||||
return p
|
||||
}
|
||||
|
||||
// SetMaxConnections sets the maximum number of concurrent connections.
|
||||
// Setting the value to 0 will allow unlimited number of connections.
|
||||
// Should not be changed once you have started calling the pacer.
|
||||
// By default this will be set to fs.Config.Checkers.
|
||||
func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.maxConnections = n
|
||||
if n <= 0 {
|
||||
p.connTokens = nil
|
||||
} else {
|
||||
p.connTokens = make(chan struct{}, n)
|
||||
for i := 0; i < n; i++ {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// SetDecayConstant sets the decay constant for the pacer
|
||||
//
|
||||
// This is the speed the time falls back to the minimum after errors
|
||||
// have occurred.
|
||||
//
|
||||
// bigger for slower decay, exponential
|
||||
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.decayConstant = decay
|
||||
return p
|
||||
}
|
||||
|
||||
// SetRetries sets the max number of tries for Call
|
||||
func (p *Pacer) SetRetries(retries int) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.retries = retries
|
||||
return p
|
||||
}
|
||||
|
||||
// SetPacer sets the pacing algorithm
|
||||
//
|
||||
// It will choose the default algorithm if an incorrect value is
|
||||
// passed in.
|
||||
func (p *Pacer) SetPacer(t Type) *Pacer {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
switch t {
|
||||
case AmazonCloudDrivePacer:
|
||||
p.calculatePace = p.acdPacer
|
||||
default:
|
||||
p.calculatePace = p.defaultPacer
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Start a call to the API
|
||||
//
|
||||
// This must be called as a pair with endCall
|
||||
//
|
||||
// This waits for the pacer token
|
||||
func (p *Pacer) beginCall() {
|
||||
// pacer starts with a token in and whenever we take one out
|
||||
// XXX ms later we put another in. We could do this with a
|
||||
// Ticker more accurately, but then we'd have to work out how
|
||||
// not to run it when it wasn't needed
|
||||
<-p.pacer
|
||||
if p.maxConnections > 0 {
|
||||
<-p.connTokens
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
// Restart the timer
|
||||
go func(t time.Duration) {
|
||||
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
|
||||
time.Sleep(t)
|
||||
p.pacer <- struct{}{}
|
||||
}(p.sleepTime)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// exponentialImplementation implements a exponentialImplementation up
|
||||
// and down pacing algorithm
|
||||
//
|
||||
// See the description for DefaultPacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) defaultPacer(retry bool) {
|
||||
oldSleepTime := p.sleepTime
|
||||
if retry {
|
||||
p.sleepTime *= 2
|
||||
if p.sleepTime > p.maxSleep {
|
||||
p.sleepTime = p.maxSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debug("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
if p.sleepTime != oldSleepTime {
|
||||
fs.Debug("pacer", "Reducing sleep to %v", p.sleepTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// acdPacer implements a truncated exponential backoff
|
||||
// strategy with randomization for Amazon Cloud Drive
|
||||
//
|
||||
// See the description for AmazonCloudDrivePacer
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
//
|
||||
// Call with p.mu held
|
||||
func (p *Pacer) acdPacer(retry bool) {
|
||||
consecutiveRetries := p.consecutiveRetries
|
||||
if consecutiveRetries == 0 {
|
||||
if p.sleepTime != p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
fs.Debug("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
||||
}
|
||||
} else {
|
||||
if consecutiveRetries > 9 {
|
||||
consecutiveRetries = 9
|
||||
}
|
||||
// consecutiveRetries starts at 1 so
|
||||
// maxSleep is 2**(consecutiveRetries-1) seconds
|
||||
maxSleep := time.Second << uint(consecutiveRetries-1)
|
||||
// actual sleep is random from 0..maxSleep
|
||||
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
|
||||
if p.sleepTime < p.minSleep {
|
||||
p.sleepTime = p.minSleep
|
||||
}
|
||||
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
|
||||
}
|
||||
}
|
||||
|
||||
// endCall implements the pacing algorithm
|
||||
//
|
||||
// This should calculate a new sleepTime. It takes a boolean as to
|
||||
// whether the operation should be retried or not.
|
||||
func (p *Pacer) endCall(retry bool) {
|
||||
if p.maxConnections > 0 {
|
||||
p.connTokens <- struct{}{}
|
||||
}
|
||||
p.mu.Lock()
|
||||
if retry {
|
||||
p.consecutiveRetries++
|
||||
} else {
|
||||
p.consecutiveRetries = 0
|
||||
}
|
||||
p.calculatePace(retry)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// call implements Call but with settable retries
|
||||
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var retry bool
|
||||
for i := 0; i < retries; i++ {
|
||||
p.beginCall()
|
||||
retry, err = fn()
|
||||
p.endCall(retry)
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
}
|
||||
if retry {
|
||||
err = fs.RetryError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Call paces the remote operations to not exceed the limits and retry
|
||||
// on rate limit exceeded
|
||||
//
|
||||
// This calls fn, expecting it to return a retry flag and an
|
||||
// error. This error may be returned wrapped in a RetryError if the
|
||||
// number of retries is exceeded.
|
||||
func (p *Pacer) Call(fn Paced) (err error) {
|
||||
p.mu.Lock()
|
||||
retries := p.retries
|
||||
p.mu.Unlock()
|
||||
return p.call(fn, retries)
|
||||
}
|
||||
|
||||
// CallNoRetry paces the remote operations to not exceed the limits
|
||||
// and return a retry error on rate limit exceeded
|
||||
//
|
||||
// This calls fn and wraps the output in a RetryError if it would like
|
||||
// it to be retried
|
||||
func (p *Pacer) CallNoRetry(fn Paced) error {
|
||||
return p.call(fn, 1)
|
||||
}
|
||||
324
pacer/pacer_test.go
Normal file
324
pacer/pacer_test.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package pacer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
p := New()
|
||||
if p.minSleep != 10*time.Millisecond {
|
||||
t.Errorf("minSleep")
|
||||
}
|
||||
if p.maxSleep != 2*time.Second {
|
||||
t.Errorf("maxSleep")
|
||||
}
|
||||
if p.sleepTime != p.minSleep {
|
||||
t.Errorf("sleepTime")
|
||||
}
|
||||
if p.retries != 10 {
|
||||
t.Errorf("retries")
|
||||
}
|
||||
if p.decayConstant != 2 {
|
||||
t.Errorf("decayConstant")
|
||||
}
|
||||
if cap(p.pacer) != 1 {
|
||||
t.Errorf("pacer 1")
|
||||
}
|
||||
if len(p.pacer) != 1 {
|
||||
t.Errorf("pacer 2")
|
||||
}
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace")
|
||||
}
|
||||
if p.maxConnections != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != fs.Config.Checkers+fs.Config.Transfers {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("consecutiveRetries")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMinSleep(t *testing.T) {
|
||||
p := New().SetMinSleep(1 * time.Millisecond)
|
||||
if p.minSleep != 1*time.Millisecond {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMaxSleep(t *testing.T) {
|
||||
p := New().SetMaxSleep(100 * time.Second)
|
||||
if p.maxSleep != 100*time.Second {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(20)
|
||||
if p.maxConnections != 20 {
|
||||
t.Errorf("maxConnections")
|
||||
}
|
||||
if cap(p.connTokens) != 20 {
|
||||
t.Errorf("connTokens")
|
||||
}
|
||||
p.SetMaxConnections(0)
|
||||
if p.maxConnections != 0 {
|
||||
t.Errorf("maxConnections is not 0")
|
||||
}
|
||||
if p.connTokens != nil {
|
||||
t.Errorf("connTokens is not nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDecayConstant(t *testing.T) {
|
||||
p := New().SetDecayConstant(17)
|
||||
if p.decayConstant != 17 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetRetries(t *testing.T) {
|
||||
p := New().SetRetries(18)
|
||||
if p.retries != 18 {
|
||||
t.Errorf("didn't set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetPacer(t *testing.T) {
|
||||
p := New().SetPacer(AmazonCloudDrivePacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.acdPacer) {
|
||||
t.Errorf("calculatePace is not acdPacer")
|
||||
}
|
||||
p.SetPacer(DefaultPacer)
|
||||
if fmt.Sprintf("%p", p.calculatePace) != fmt.Sprintf("%p", p.defaultPacer) {
|
||||
t.Errorf("calculatePace is not defaultPacer")
|
||||
}
|
||||
}
|
||||
|
||||
// emptyTokens empties the pacer of all its tokens
|
||||
func emptyTokens(p *Pacer) {
|
||||
for len(p.pacer) != 0 {
|
||||
<-p.pacer
|
||||
}
|
||||
for len(p.connTokens) != 0 {
|
||||
<-p.connTokens
|
||||
}
|
||||
}
|
||||
|
||||
// waitForPace waits for duration for the pace to arrive
|
||||
// returns the time that it arrived or a zero time
|
||||
func waitForPace(p *Pacer, duration time.Duration) (when time.Time) {
|
||||
select {
|
||||
case <-time.After(duration):
|
||||
return
|
||||
case <-p.pacer:
|
||||
return time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeginCall(t *testing.T) {
|
||||
p := New().SetMaxConnections(10).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
t.Errorf("beginSleep fired too early #1")
|
||||
}
|
||||
startTime := time.Now()
|
||||
p.pacer <- struct{}{}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
connTime := time.Now()
|
||||
p.connTokens <- struct{}{}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
paceTime := waitForPace(p, 10*time.Millisecond)
|
||||
if paceTime.IsZero() {
|
||||
t.Errorf("beginSleep didn't fire")
|
||||
} else if paceTime.Sub(startTime) < 0 {
|
||||
t.Errorf("pace arrived before returning pace token")
|
||||
} else if paceTime.Sub(connTime) < 0 {
|
||||
t.Errorf("pace arrived before sending conn token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeginCallZeroConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(0).SetMinSleep(1 * time.Millisecond)
|
||||
emptyTokens(p)
|
||||
go p.beginCall()
|
||||
if !waitForPace(p, 10*time.Millisecond).IsZero() {
|
||||
t.Errorf("beginSleep fired too early #1")
|
||||
}
|
||||
startTime := time.Now()
|
||||
p.pacer <- struct{}{}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
paceTime := waitForPace(p, 10*time.Millisecond)
|
||||
if paceTime.IsZero() {
|
||||
t.Errorf("beginSleep didn't fire")
|
||||
} else if paceTime.Sub(startTime) < 0 {
|
||||
t.Errorf("pace arrived before returning pace token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultPacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(DefaultPacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{time.Millisecond, true, 2 * time.Millisecond},
|
||||
{time.Second, true, time.Second},
|
||||
{(3 * time.Second) / 4, true, time.Second},
|
||||
{time.Second, false, 750 * time.Millisecond},
|
||||
{1000 * time.Microsecond, false, time.Millisecond},
|
||||
{1200 * time.Microsecond, false, time.Millisecond},
|
||||
} {
|
||||
p.sleepTime = test.in
|
||||
p.defaultPacer(test.retry)
|
||||
got := p.sleepTime
|
||||
if got != test.want {
|
||||
t.Errorf("bad sleep want %v got %v", test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAmazonCloudDrivePacer(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetPacer(AmazonCloudDrivePacer).SetMaxSleep(time.Second).SetDecayConstant(2)
|
||||
// Do lots of times because of the random number!
|
||||
for _, test := range []struct {
|
||||
in time.Duration
|
||||
consecutiveRetries int
|
||||
retry bool
|
||||
want time.Duration
|
||||
}{
|
||||
{time.Millisecond, 0, true, time.Millisecond},
|
||||
{10 * time.Millisecond, 0, true, time.Millisecond},
|
||||
{1 * time.Second, 1, true, 500 * time.Millisecond},
|
||||
{1 * time.Second, 2, true, 1 * time.Second},
|
||||
{1 * time.Second, 3, true, 2 * time.Second},
|
||||
{1 * time.Second, 4, true, 4 * time.Second},
|
||||
{1 * time.Second, 5, true, 8 * time.Second},
|
||||
{1 * time.Second, 6, true, 16 * time.Second},
|
||||
{1 * time.Second, 7, true, 32 * time.Second},
|
||||
{1 * time.Second, 8, true, 64 * time.Second},
|
||||
{1 * time.Second, 9, true, 128 * time.Second},
|
||||
{1 * time.Second, 10, true, 128 * time.Second},
|
||||
{1 * time.Second, 11, true, 128 * time.Second},
|
||||
} {
|
||||
const n = 1000
|
||||
var sum time.Duration
|
||||
// measure average time over n cycles
|
||||
for i := 0; i < n; i++ {
|
||||
p.sleepTime = test.in
|
||||
p.consecutiveRetries = test.consecutiveRetries
|
||||
p.acdPacer(test.retry)
|
||||
sum += p.sleepTime
|
||||
}
|
||||
got := sum / n
|
||||
//t.Logf("%+v: got = %v", test, got)
|
||||
if got < (test.want*9)/10 || got > (test.want*11)/10 {
|
||||
t.Fatalf("%+v: bad sleep want %v+/-10%% got %v", test, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndCall(t *testing.T) {
|
||||
p := New().SetMaxConnections(5)
|
||||
emptyTokens(p)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(true)
|
||||
if len(p.connTokens) != 1 {
|
||||
t.Errorf("Expecting 1 token")
|
||||
}
|
||||
if p.consecutiveRetries != 2 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndCallZeroConnections(t *testing.T) {
|
||||
p := New().SetMaxConnections(0)
|
||||
emptyTokens(p)
|
||||
p.consecutiveRetries = 1
|
||||
p.endCall(false)
|
||||
if len(p.connTokens) != 0 {
|
||||
t.Errorf("Expecting 0 token")
|
||||
}
|
||||
if p.consecutiveRetries != 0 {
|
||||
t.Errorf("Bad consecutive retries")
|
||||
}
|
||||
}
|
||||
|
||||
var errFoo = fmt.Errorf("Foo")
|
||||
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func Test_callNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
|
||||
dp := &dummyPaced{retry: false}
|
||||
err := p.call(dp.fn, 10)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
}
|
||||
if err != errFoo {
|
||||
t.Errorf("err want %v got %v", errFoo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_callRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond)
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.call(dp.fn, 10)
|
||||
if dp.called != 10 {
|
||||
t.Errorf("called want %d got %d", 10, dp.called)
|
||||
}
|
||||
if err == errFoo {
|
||||
t.Errorf("err didn't want %v got %v", errFoo, err)
|
||||
}
|
||||
_, ok := err.(fs.Retry)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCall(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
if dp.called != 20 {
|
||||
t.Errorf("called want %d got %d", 20, dp.called)
|
||||
}
|
||||
_, ok := err.(fs.Retry)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallNoRetry(t *testing.T) {
|
||||
p := New().SetMinSleep(time.Millisecond).SetMaxSleep(2 * time.Millisecond).SetRetries(20)
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
if dp.called != 1 {
|
||||
t.Errorf("called want %d got %d", 1, dp.called)
|
||||
}
|
||||
_, ok := err.(fs.Retry)
|
||||
if !ok {
|
||||
t.Errorf("didn't return a retry error")
|
||||
}
|
||||
}
|
||||
186
rclone.go
186
rclone.go
@@ -12,14 +12,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/drive"
|
||||
_ "github.com/ncw/rclone/dropbox"
|
||||
_ "github.com/ncw/rclone/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/local"
|
||||
_ "github.com/ncw/rclone/onedrive"
|
||||
_ "github.com/ncw/rclone/s3"
|
||||
_ "github.com/ncw/rclone/swift"
|
||||
)
|
||||
@@ -31,16 +33,19 @@ var (
|
||||
statsInterval = pflag.DurationP("stats", "", time.Minute*1, "Interval to print stats (0 to disable)")
|
||||
version = pflag.BoolP("version", "V", false, "Print the version number")
|
||||
logFile = pflag.StringP("log-file", "", "", "Log everything to this file")
|
||||
retries = pflag.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
)
|
||||
|
||||
// Command holds info about the current running command
|
||||
type Command struct {
|
||||
Name string
|
||||
Help string
|
||||
ArgsHelp string
|
||||
Run func(fdst, fsrc fs.Fs)
|
||||
Run func(fdst, fsrc fs.Fs) error
|
||||
MinArgs int
|
||||
MaxArgs int
|
||||
NoStats bool
|
||||
Retry bool
|
||||
}
|
||||
|
||||
// checkArgs checks there are enough arguments and prints a message if not
|
||||
@@ -56,6 +61,7 @@ func (cmd *Command) checkArgs(args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Commands is a slice of possible Command~s
|
||||
var Commands = []Command{
|
||||
{
|
||||
Name: "copy",
|
||||
@@ -64,14 +70,12 @@ var Commands = []Command{
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to copy: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.CopyDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "sync",
|
||||
@@ -82,69 +86,88 @@ var Commands = []Command{
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Sync(fdst, fsrc, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to sync: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Sync(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "move",
|
||||
ArgsHelp: "source:path dest:path",
|
||||
Help: `
|
||||
Moves the source to the destination. This is equivalent to a
|
||||
copy followed by a purge, but may use server side operations
|
||||
to speed it up. Since this can cause data loss, test first
|
||||
with the --dry-run flag.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.MoveDir(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all the objects in the the path with size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.List(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.List(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "lsd",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all directories/containers/buckets in the the path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListDir(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to listdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListDir(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "lsl",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.ListLong(fdst, os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list long: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.ListLong(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "md5sum",
|
||||
ArgsHelp: "[remote:path]",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Md5sum(fdst, os.Stdout)
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Md5sum(fdst, os.Stdout)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
},
|
||||
{
|
||||
Name: "size",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Returns the total size of objects in remote:path and the number
|
||||
of objects.`,
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
objects, size, err := fs.Count(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to list: %v", err)
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Total objects: %d\n", objects)
|
||||
fmt.Printf("Total size: %v (%d bytes)\n", fs.SizeSuffix(size), size)
|
||||
return nil
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
@@ -154,14 +177,12 @@ var Commands = []Command{
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Make the path if it doesn't already exist`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Mkdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to mkdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Mkdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "rmdir",
|
||||
@@ -169,28 +190,24 @@ var Commands = []Command{
|
||||
Help: `
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Rmdir(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to rmdir: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Rmdir(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "purge",
|
||||
ArgsHelp: "remote:path",
|
||||
Help: `
|
||||
Remove the path and all of its contents.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Purge(fdst)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to purge: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Purge(fdst)
|
||||
},
|
||||
MinArgs: 1,
|
||||
MaxArgs: 1,
|
||||
Retry: true,
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
@@ -199,11 +216,8 @@ var Commands = []Command{
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
err := fs.Check(fdst, fsrc)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to check: %v", err)
|
||||
}
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
return fs.Check(fdst, fsrc)
|
||||
},
|
||||
MinArgs: 2,
|
||||
MaxArgs: 2,
|
||||
@@ -212,8 +226,9 @@ var Commands = []Command{
|
||||
Name: "config",
|
||||
Help: `
|
||||
Enter an interactive configuration session.`,
|
||||
Run: func(fdst, fsrc fs.Fs) {
|
||||
Run: func(fdst, fsrc fs.Fs) error {
|
||||
fs.EditConfig()
|
||||
return nil
|
||||
},
|
||||
NoStats: true,
|
||||
},
|
||||
@@ -243,8 +258,8 @@ Subcommands:
|
||||
fmt.Fprintf(os.Stderr, "Options:\n")
|
||||
pflag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'up'
|
||||
for 'upload'.
|
||||
It is only necessary to use a unique prefix of the subcommand, eg 'mo'
|
||||
for 'move'.
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -255,7 +270,7 @@ func fatal(message string, args ...interface{}) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse the command line flags
|
||||
// ParseFlags parses the command line flags
|
||||
func ParseFlags() {
|
||||
pflag.Usage = syntaxError
|
||||
pflag.Parse()
|
||||
@@ -278,7 +293,7 @@ func ParseFlags() {
|
||||
}
|
||||
}
|
||||
|
||||
// Parse the command from the command line
|
||||
// ParseCommand parses the command from the command line
|
||||
func ParseCommand() (*Command, []string) {
|
||||
args := pflag.Args()
|
||||
if len(args) < 1 {
|
||||
@@ -289,6 +304,7 @@ func ParseCommand() (*Command, []string) {
|
||||
args = args[1:]
|
||||
|
||||
// Find the command doing a prefix match
|
||||
var found = make([]*Command, 0, 1)
|
||||
var command *Command
|
||||
for i := range Commands {
|
||||
trialCommand := &Commands[i]
|
||||
@@ -297,16 +313,24 @@ func ParseCommand() (*Command, []string) {
|
||||
command = trialCommand
|
||||
break
|
||||
} else if strings.HasPrefix(trialCommand.Name, cmd) {
|
||||
if command != nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Not unique - matches multiple commands %q", cmd)
|
||||
}
|
||||
command = trialCommand
|
||||
found = append(found, trialCommand)
|
||||
}
|
||||
}
|
||||
if command == nil {
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
switch len(found) {
|
||||
case 0:
|
||||
fs.Stats.Error()
|
||||
log.Fatalf("Unknown command %q", cmd)
|
||||
case 1:
|
||||
command = found[0]
|
||||
default:
|
||||
fs.Stats.Error()
|
||||
var names []string
|
||||
for _, cmd := range found {
|
||||
names = append(names, `"`+cmd.Name+`"`)
|
||||
}
|
||||
log.Fatalf("Not unique - matches multiple commands: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
if command.Run == nil {
|
||||
syntaxError()
|
||||
@@ -315,7 +339,7 @@ func ParseCommand() (*Command, []string) {
|
||||
return command, args
|
||||
}
|
||||
|
||||
// Create a Fs from a name
|
||||
// NewFs creates a Fs from a name
|
||||
func NewFs(remote string) fs.Fs {
|
||||
f, err := fs.NewFs(remote)
|
||||
if err != nil {
|
||||
@@ -325,7 +349,7 @@ func NewFs(remote string) fs.Fs {
|
||||
return f
|
||||
}
|
||||
|
||||
// Print the stats every statsInterval
|
||||
// StartStats prints the stats every statsInterval
|
||||
func StartStats() {
|
||||
if *statsInterval <= 0 {
|
||||
return
|
||||
@@ -353,7 +377,10 @@ func main() {
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open log file: %v", err)
|
||||
}
|
||||
f.Seek(0, os.SEEK_END)
|
||||
_, err = f.Seek(0, os.SEEK_END)
|
||||
if err != nil {
|
||||
log.Printf("Failed to seek log file to end: %v", err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
redirectStderr(f)
|
||||
}
|
||||
@@ -376,8 +403,25 @@ func main() {
|
||||
|
||||
// Run the actual command
|
||||
if command.Run != nil {
|
||||
command.Run(fdst, fsrc)
|
||||
if !command.NoStats {
|
||||
var err error
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = command.Run(fdst, fsrc)
|
||||
if !command.Retry || (err == nil && !fs.Stats.Errored()) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
|
||||
} else {
|
||||
fs.Log(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
|
||||
}
|
||||
if try < *retries {
|
||||
fs.Stats.ResetErrors()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to %s: %v", command.Name, err)
|
||||
}
|
||||
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fmt.Fprintln(os.Stderr, fs.Stats)
|
||||
}
|
||||
if fs.Config.Verbose {
|
||||
|
||||
558
s3/s3.go
558
s3/s3.go
@@ -1,71 +1,94 @@
|
||||
// S3 interface
|
||||
// Package s3 provides an interface to Amazon S3 oject storage
|
||||
package s3
|
||||
|
||||
// FIXME need to prevent anything but ListDir working for s3://
|
||||
|
||||
/*
|
||||
Progress of port to aws-sdk
|
||||
|
||||
* Don't really need o.meta at all?
|
||||
|
||||
What happens if you CTRL-C a multipart upload
|
||||
* get an incomplete upload
|
||||
* disappears when you delete the bucket
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/goamz/aws"
|
||||
"github.com/ncw/goamz/s3"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
fs.Register(&fs.Info{
|
||||
Name: "s3",
|
||||
NewFs: NewFs,
|
||||
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.",
|
||||
Help: "AWS Access Key ID - leave blank for anonymous access.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password). ",
|
||||
Help: "AWS Secret Access Key (password) - leave blank for anonymous access.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.",
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://s3.amazonaws.com/",
|
||||
Value: "us-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-external-1.amazonaws.com",
|
||||
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-2.amazonaws.com",
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-1.amazonaws.com",
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-eu-west-1.amazonaws.com",
|
||||
Value: "eu-west-1",
|
||||
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-1.amazonaws.com",
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-2.amazonaws.com",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
|
||||
Value: "ap-southeast-2",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
|
||||
}, {
|
||||
Value: "https://s3-ap-northeast-1.amazonaws.com",
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-sa-east-1.amazonaws.com",
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
|
||||
}, {
|
||||
Value: "other-v4-signature",
|
||||
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Endpoint.",
|
||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
|
||||
@@ -100,37 +123,53 @@ func init() {
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
|
||||
listChunkSize = 1024 // number of items to read at once
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
listChunkSize = 1024 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
)
|
||||
|
||||
// FsS3 represents a remote s3 server
|
||||
type FsS3 struct {
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
b *s3.Bucket // the connection to the bucket
|
||||
bucket string // the bucket we are working on
|
||||
perm s3.ACL // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
locationConstraint string // location constraint of new buckets
|
||||
}
|
||||
|
||||
// FsObjectS3 describes a s3 object
|
||||
type FsObjectS3 struct {
|
||||
// Object describes a s3 object
|
||||
type Object struct {
|
||||
// Will definitely have everything but meta which may be nil
|
||||
//
|
||||
// List will read everything but meta - to fill that in need to call
|
||||
// readMetaData
|
||||
s3 *FsS3 // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta s3.Headers // The object metadata if known - may be nil
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this FsS3 to a string
|
||||
func (f *FsS3) String() string {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("S3 bucket %s", f.bucket)
|
||||
}
|
||||
@@ -153,62 +192,88 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(name string) (*s3.S3, error) {
|
||||
func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
accessKeyId := fs.ConfigFile.MustValue(name, "access_key_id")
|
||||
if accessKeyId == "" {
|
||||
return nil, errors.New("access_key_id not found")
|
||||
}
|
||||
accessKeyID := fs.ConfigFile.MustValue(name, "access_key_id")
|
||||
secretAccessKey := fs.ConfigFile.MustValue(name, "secret_access_key")
|
||||
if secretAccessKey == "" {
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
|
||||
|
||||
// FIXME look through all the regions by name and use one of them if found
|
||||
|
||||
// Synthesize the region
|
||||
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
if s3Endpoint == "" {
|
||||
s3Endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
region := aws.Region{
|
||||
Name: "s3",
|
||||
S3Endpoint: s3Endpoint,
|
||||
S3LocationConstraint: false,
|
||||
}
|
||||
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
|
||||
if s3LocationConstraint != "" {
|
||||
region.Name = s3LocationConstraint
|
||||
region.S3LocationConstraint = true
|
||||
var auth *credentials.Credentials
|
||||
switch {
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
fs.Debug(name, "Using anonymous access for S3")
|
||||
auth = credentials.AnonymousCredentials
|
||||
case accessKeyID == "":
|
||||
return nil, nil, errors.New("access_key_id not found")
|
||||
case secretAccessKey == "":
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
default:
|
||||
auth = credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
|
||||
}
|
||||
|
||||
c := s3.New(auth, region)
|
||||
c.Client = fs.Config.Client()
|
||||
return c, nil
|
||||
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
region := fs.ConfigFile.MustValue(name, "region")
|
||||
if region == "" && endpoint == "" {
|
||||
endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(auth).
|
||||
WithEndpoint(endpoint).
|
||||
WithHTTPClient(fs.Config.Client()).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debug(name, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(accessKeyID, secretAccessKey, req.HTTPRequest)
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
}
|
||||
// Add user agent
|
||||
c.Handlers.Build.PushBack(func(r *request.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
|
||||
})
|
||||
return c, ses, nil
|
||||
}
|
||||
|
||||
// NewFsS3 contstructs an FsS3 from the path, bucket:path
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := s3Connection(name)
|
||||
c, ses, err := s3Connection(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsS3{
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
b: c.Bucket(bucket),
|
||||
perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
ses: ses,
|
||||
// FIXME perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.b.Head(directory, nil)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &directory,
|
||||
}
|
||||
_, err = f.c.HeadObject(&req)
|
||||
if err == nil {
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
@@ -222,27 +287,28 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return fs.NewLimited(f, obj), nil
|
||||
}
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
|
||||
o := &FsObjectS3{
|
||||
s3: f,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
var err error
|
||||
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read last modified: %s", err)
|
||||
if info.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *info.LastModified
|
||||
}
|
||||
o.etag = info.ETag
|
||||
o.bytes = info.Size
|
||||
o.etag = aws.StringValue(info.ETag)
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -253,76 +319,92 @@ func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
|
||||
func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
if directories {
|
||||
delimiter = "/"
|
||||
}
|
||||
marker := ""
|
||||
var marker *string
|
||||
for {
|
||||
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &f.bucket,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &f.root,
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
resp, err := f.c.ListObjects(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
break
|
||||
} else {
|
||||
rootLength := len(f.root)
|
||||
if directories {
|
||||
for _, remote := range objects.CommonPrefixes {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
fs.Log(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Log(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := remote[rootLength:]
|
||||
remote = remote[rootLength:]
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
fn(remote, &s3.Key{Key: remote})
|
||||
fn(remote, &s3.Object{Key: &remote})
|
||||
}
|
||||
} else {
|
||||
for i := range objects.Contents {
|
||||
object := &objects.Contents[i]
|
||||
if !strings.HasPrefix(object.Key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Key)
|
||||
for _, object := range resp.Contents {
|
||||
key := aws.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote := object.Key[rootLength:]
|
||||
remote := key[rootLength:]
|
||||
fn(remote, object)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !objects.IsTruncated {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
marker = objects.NextMarker
|
||||
if marker == "" {
|
||||
marker = objects.Contents[len(objects.Contents)-1].Key
|
||||
if !aws.BoolValue(resp.IsTruncated) {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
||||
marker = resp.Contents[len(resp.Contents)-1].Key
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsS3) List() fs.ObjectsChan {
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *s3.Key) {
|
||||
f.list(false, func(remote string, object *s3.Object) {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
@@ -332,22 +414,23 @@ func (f *FsS3) List() fs.ObjectsChan {
|
||||
return out
|
||||
}
|
||||
|
||||
// Lists the buckets
|
||||
func (f *FsS3) ListDir() fs.DirChan {
|
||||
// ListDir lists the buckets
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.bucket == "" {
|
||||
// List the buckets
|
||||
go func() {
|
||||
defer close(out)
|
||||
buckets, err := f.c.ListBuckets()
|
||||
req := s3.ListBucketsInput{}
|
||||
resp, err := f.c.ListBuckets(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
|
||||
} else {
|
||||
for _, bucket := range buckets {
|
||||
for _, bucket := range resp.Buckets {
|
||||
out <- &fs.Dir{
|
||||
Name: bucket.Name,
|
||||
When: bucket.CreationDate,
|
||||
Name: aws.StringValue(bucket.Name),
|
||||
When: aws.TimeValue(bucket.CreationDate),
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
@@ -358,10 +441,14 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
// List the directories in the path in the bucket
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(true, func(remote string, object *s3.Key) {
|
||||
f.list(true, func(remote string, object *s3.Object) {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
out <- &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: object.Size,
|
||||
Bytes: size,
|
||||
Count: 0,
|
||||
}
|
||||
})
|
||||
@@ -371,97 +458,136 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
}
|
||||
|
||||
// Put the FsObject into the bucket
|
||||
func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
fs := &FsObjectS3{s3: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return fs, fs.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsS3) Mkdir() error {
|
||||
err := f.b.PutBucket(f.perm)
|
||||
if err, ok := err.(*s3.Error); ok {
|
||||
if err.Code == "BucketAlreadyOwnedByYou" {
|
||||
func (f *Fs) Mkdir() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.perm,
|
||||
}
|
||||
if f.locationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.locationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsS3) Rmdir() error {
|
||||
return f.b.DelBucket()
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (f *FsS3) Precision() time.Duration {
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
key := f.root + remote
|
||||
source := srcFs.bucket + "/" + srcFs.root + srcObj.remote
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
_, err := f.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), err
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectS3) Fs() fs.Fs {
|
||||
return o.s3
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectS3) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectS3) Remote() string {
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectS3) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
etag := strings.Trim(strings.ToLower(o.etag), `"`)
|
||||
// Check the etag is a valid md5sum
|
||||
if !matchMd5.MatchString(etag) {
|
||||
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
||||
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
||||
return "", nil
|
||||
}
|
||||
return etag, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectS3) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// if we get a 404 error then we retry a few times for eventual
|
||||
// consistency reasons
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectS3) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
var headers s3.Headers
|
||||
|
||||
// Try reading the metadata a few times (with exponential
|
||||
// backoff) to get around eventual consistency on 404 error
|
||||
for tries := uint(0); tries < 10; tries++ {
|
||||
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
|
||||
if s3Err, ok := err.(*s3.Error); ok {
|
||||
if s3Err.StatusCode == http.StatusNotFound {
|
||||
time.Sleep(5 * time.Millisecond << tries)
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.fs.c.HeadObject(&req)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -469,19 +595,17 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if contentLength, ok := headers["Content-Length"]; ok {
|
||||
size, err = strconv.ParseInt(contentLength, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read size from: %q", headers)
|
||||
return err
|
||||
}
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
}
|
||||
o.etag = headers["Etag"]
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = headers
|
||||
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
|
||||
o.meta = resp.Metadata
|
||||
if resp.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -490,7 +614,7 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectS3) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
@@ -498,11 +622,11 @@ func (o *FsObjectS3) ModTime() time.Time {
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok {
|
||||
if !ok || d == nil {
|
||||
// fs.Debug(o, "No metadata")
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
modTime, err := swift.FloatStringToTime(*d)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return o.lastModified
|
||||
@@ -510,44 +634,85 @@ func (o *FsObjectS3) ModTime() time.Time {
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
key := o.fs.root + o.remote
|
||||
sourceKey := o.fs.bucket + "/" + key
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.perm,
|
||||
Key: &key,
|
||||
CopySource: &sourceKey,
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
_, err = o.fs.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectS3) Storable() bool {
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
|
||||
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
|
||||
return
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.fs.c.GetObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Set the mtime in the headers
|
||||
headers := s3.Headers{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = 2
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
})
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
|
||||
// Guess the content type
|
||||
contentType := fs.MimeType(o)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.perm,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &contentType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
_, err := uploader.Upload(&req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
@@ -555,10 +720,19 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectS3) Remove() error {
|
||||
return o.s3.b.Del(o.s3.root + o.remote)
|
||||
func (o *Object) Remove() error {
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.DeleteObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
_, err := o.fs.c.DeleteObject(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsS3{}
|
||||
var _ fs.Object = &FsObjectS3{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test S3 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package s3_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil))
|
||||
fstests.NilObject = fs.Object((*s3.Object)(nil))
|
||||
fstests.RemoteName = "TestS3:"
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
115
s3/v2sign.go
Normal file
115
s3/v2sign.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// v2 signing
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"acl": struct{}{},
|
||||
"location": struct{}{},
|
||||
"logging": struct{}{},
|
||||
"notification": struct{}{},
|
||||
"partNumber": struct{}{},
|
||||
"policy": struct{}{},
|
||||
"requestPayment": struct{}{},
|
||||
"torrent": struct{}{},
|
||||
"uploadId": struct{}{},
|
||||
"uploads": struct{}{},
|
||||
"versionId": struct{}{},
|
||||
"versioning": struct{}{},
|
||||
"versions": struct{}{},
|
||||
"response-content-type": struct{}{},
|
||||
"response-content-language": struct{}{},
|
||||
"response-expires": struct{}{},
|
||||
"response-cache-control": struct{}{},
|
||||
"response-content-disposition": struct{}{},
|
||||
"response-content-encoding": struct{}{},
|
||||
}
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
// Look through headers of interest
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "content-md5":
|
||||
md5 = v[0]
|
||||
case "content-type":
|
||||
contentType = v[0]
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
}
|
||||
|
||||
// Look for query parameters which need to be added to the signature
|
||||
params := req.URL.Query()
|
||||
var queriesToSign []string
|
||||
for k, vs := range params {
|
||||
if _, ok := s3ParamsToSign[k]; ok {
|
||||
for _, v := range vs {
|
||||
if v == "" {
|
||||
queriesToSign = append(queriesToSign, k)
|
||||
} else {
|
||||
queriesToSign = append(queriesToSign, k+"="+v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add query parameters to URI
|
||||
if len(queriesToSign) > 0 {
|
||||
sort.StringSlice(queriesToSign).Sort()
|
||||
uri += "?" + strings.Join(queriesToSign, "&")
|
||||
}
|
||||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
_, _ = hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
}
|
||||
453
swift/swift.go
453
swift/swift.go
@@ -1,22 +1,36 @@
|
||||
// Swift interface
|
||||
// Package swift provides an interface to the Swift object storage system
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
directoryMarkerMaxSize = 1 // max size that directory marker objects can be
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.FsInfo{
|
||||
fs.Register(&fs.Info{
|
||||
Name: "swift",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
@@ -51,32 +65,48 @@ func init() {
|
||||
Name: "region",
|
||||
Help: "Region name - optional",
|
||||
},
|
||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||
},
|
||||
})
|
||||
// snet = flag.Bool("swift-snet", false, "Use internal service network") // FIXME not implemented
|
||||
pflag.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
||||
}
|
||||
|
||||
// FsSwift represents a remote swift server
|
||||
type FsSwift struct {
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
root string // the path we are working on if any
|
||||
// Fs represents a remote swift server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
c swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
root string // the path we are working on if any
|
||||
}
|
||||
|
||||
// FsObjectSwift describes a swift object
|
||||
// Object describes a swift object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type FsObjectSwift struct {
|
||||
swift *FsSwift // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
meta *swift.Metadata // The object metadata if known
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
info swift.Object // Info from the swift object if known
|
||||
headers *swift.Headers // The object headers if known
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this FsSwift to a string
|
||||
func (f *FsSwift) String() string {
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.container)
|
||||
}
|
||||
@@ -108,14 +138,14 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
||||
if apiKey == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
authUrl := fs.ConfigFile.MustValue(name, "auth")
|
||||
if authUrl == "" {
|
||||
authURL := fs.ConfigFile.MustValue(name, "auth")
|
||||
if authURL == "" {
|
||||
return nil, errors.New("auth not found")
|
||||
}
|
||||
c := &swift.Connection{
|
||||
UserName: userName,
|
||||
ApiKey: apiKey,
|
||||
AuthUrl: authUrl,
|
||||
AuthUrl: authURL,
|
||||
UserAgent: fs.UserAgent,
|
||||
Tenant: fs.ConfigFile.MustValue(name, "tenant"),
|
||||
Region: fs.ConfigFile.MustValue(name, "region"),
|
||||
@@ -130,7 +160,7 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an FsSwift from the path, container:path
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
@@ -140,16 +170,18 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &FsSwift{
|
||||
c: *c,
|
||||
container: container,
|
||||
root: directory,
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: *c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, _, err = f.c.Object(container, directory)
|
||||
if err == nil {
|
||||
// Check to see if the object exists - ignore directory markers
|
||||
_, headers, err := f.c.Object(container, directory)
|
||||
if err == nil && headers["Content-Type"] != directoryMarkerContentType {
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -168,46 +200,50 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
fs := &FsObjectSwift{
|
||||
swift: f,
|
||||
func (f *Fs) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
fs.info = *info
|
||||
// Set info but not headers
|
||||
o.info = *info
|
||||
} else {
|
||||
err := fs.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData() // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
// logged already FsDebug("Failed to read info: %s", err)
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fs
|
||||
return o
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
// NewFsObject returns an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsSwift) NewFsObject(remote string) fs.Object {
|
||||
func (f *Fs) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
// listFn is called from list and listContainerRoot to handle an object
|
||||
type listFn func(string, *swift.Object) error
|
||||
|
||||
// listContainerRoot lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
func (f *Fs) listContainerRoot(container, root string, directories bool, fn listFn) error {
|
||||
// Options for ObjectsWalk
|
||||
opts := swift.ObjectsOpts{
|
||||
Prefix: f.root,
|
||||
Prefix: root,
|
||||
Limit: 256,
|
||||
}
|
||||
if directories {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
rootLength := len(f.root)
|
||||
err := f.c.ObjectsWalk(f.container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(f.container, opts)
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
objects, err := f.c.Objects(container, opts)
|
||||
if err == nil {
|
||||
for i := range objects {
|
||||
object := &objects[i]
|
||||
@@ -218,46 +254,68 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
}
|
||||
object.Name = object.Name[:len(object.Name)-1]
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, f.root) {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
fs.Log(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
fn(remote, object)
|
||||
err = fn(remote, object)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return objects, err
|
||||
})
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *Fs) list(directories bool, fn listFn) {
|
||||
err := f.listContainerRoot(f.container, f.root, directories, fn)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
|
||||
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsSwift) List() fs.ObjectsChan {
|
||||
// listFiles walks the path returning a channel of FsObjects
|
||||
//
|
||||
// if ignoreStorable is set then it outputs the file even if Storable() is false
|
||||
func (f *Fs) listFiles(ignoreStorable bool) fs.ObjectsChan {
|
||||
out := make(fs.ObjectsChan, fs.Config.Checkers)
|
||||
if f.container == "" {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a container using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *swift.Object) {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
f.list(false, func(remote string, object *swift.Object) error {
|
||||
if o := f.newFsObjectWithInfo(remote, object); o != nil {
|
||||
// Storable does a full metadata read on 0 size objects which might be manifest files
|
||||
storable := o.Storable()
|
||||
if storable || ignoreStorable {
|
||||
out <- o
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Lists the containers
|
||||
func (f *FsSwift) ListDir() fs.DirChan {
|
||||
// List walks the path returning a channel of FsObjects
|
||||
func (f *Fs) List() fs.ObjectsChan {
|
||||
return f.listFiles(false)
|
||||
}
|
||||
|
||||
// ListDir lists the containers
|
||||
func (f *Fs) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
if f.container == "" {
|
||||
// List the containers
|
||||
@@ -266,7 +324,7 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list containers: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list containers: %v", err)
|
||||
} else {
|
||||
for _, container := range containers {
|
||||
out <- &fs.Dir{
|
||||
@@ -281,12 +339,13 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
// List the directories in the path in the container
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(true, func(remote string, object *swift.Object) {
|
||||
f.list(true, func(remote string, object *swift.Object) error {
|
||||
out <- &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: object.Bytes,
|
||||
Count: 0,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
}
|
||||
@@ -298,74 +357,130 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *FsSwift) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary FsObject under construction
|
||||
fs := &FsObjectSwift{swift: f, remote: remote}
|
||||
func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return fs, fs.Update(in, modTime, size)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *FsSwift) Mkdir() error {
|
||||
func (f *Fs) Mkdir() error {
|
||||
return f.c.ContainerCreate(f.container, nil)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsSwift) Rmdir() error {
|
||||
func (f *Fs) Rmdir() error {
|
||||
if f.root != "" {
|
||||
return nil
|
||||
}
|
||||
return f.c.ContainerDelete(f.container)
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (fs *FsSwift) Precision() time.Duration {
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories
|
||||
//
|
||||
// Implemented here so we can make sure we delete directory markers
|
||||
func (f *Fs) Purge() error {
|
||||
fs.DeleteFiles(f.listFiles(true))
|
||||
return f.Rmdir()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debug(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
_, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewFsObject(remote), nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
func (o *FsObjectSwift) Fs() fs.Fs {
|
||||
return o.swift
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Fs {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *FsObjectSwift) String() string {
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectSwift) Remote() string {
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *FsObjectSwift) Md5sum() (string, error) {
|
||||
func (o *Object) Md5sum() (string, error) {
|
||||
isManifest, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isManifest {
|
||||
fs.Debug(o, "Returning empty Md5sum for swift manifest file")
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.info.Hash), nil
|
||||
}
|
||||
|
||||
// isManifestFile checks for manifest header
|
||||
func (o *Object) isManifestFile() (bool, error) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
_, isManifestFile := (*o.headers)["X-Object-Manifest"]
|
||||
return isManifestFile, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *FsObjectSwift) Size() int64 {
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectSwift) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if o.headers != nil {
|
||||
return nil
|
||||
}
|
||||
info, h, err := o.swift.c.Object(o.swift.container, o.swift.root+o.remote)
|
||||
info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
}
|
||||
meta := h.ObjectMetadata()
|
||||
o.info = info
|
||||
o.meta = &meta
|
||||
o.headers = &h
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -374,13 +489,13 @@ func (o *FsObjectSwift) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *FsObjectSwift) ModTime() time.Time {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return o.info.LastModified
|
||||
}
|
||||
modTime, err := o.meta.GetModTime()
|
||||
modTime, err := o.headers.ObjectMetadata().GetModTime()
|
||||
if err != nil {
|
||||
// fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return o.info.LastModified
|
||||
@@ -388,55 +503,185 @@ func (o *FsObjectSwift) ModTime() time.Time {
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
func (o *FsObjectSwift) SetModTime(modTime time.Time) {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta.SetModTime(modTime)
|
||||
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
|
||||
meta := o.headers.ObjectMetadata()
|
||||
meta.SetModTime(modTime)
|
||||
newHeaders := meta.ObjectHeaders()
|
||||
for k, v := range newHeaders {
|
||||
(*o.headers)[k] = v
|
||||
}
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
func (o *FsObjectSwift) Storable() bool {
|
||||
return true
|
||||
// Storable returns if this object is storable
|
||||
//
|
||||
// It reads the metadata for <= directoryMarkerMaxSize byte objects then compares the
|
||||
// Content-Type to directoryMarkerContentType - that makes it a
|
||||
// directory marker which is not storable.
|
||||
func (o *Object) Storable() bool {
|
||||
if o.info.Bytes > directoryMarkerMaxSize {
|
||||
return true
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read metadata: %s", err)
|
||||
return true
|
||||
}
|
||||
contentType := (*o.headers)["Content-Type"]
|
||||
return contentType != directoryMarkerContentType
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectSwift) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.swift.c.ObjectOpen(o.swift.container, o.swift.root+o.remote, true, nil)
|
||||
func (o *Object) Open() (in io.ReadCloser, err error) {
|
||||
in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, true, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// min returns the smallest of x, y
|
||||
func min(x, y int64) int64 {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
// removeSegments removes any old segments from o
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsRoot := o.fs.root + o.remote + "/"
|
||||
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error {
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
if err == nil {
|
||||
fs.Debug(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateChunks updates the existing object using chunks to a separate
|
||||
// container. It returns a string which prefixes current segments.
|
||||
func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) {
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
err := o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Upload the chunks
|
||||
left := size
|
||||
i := 0
|
||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||
segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
|
||||
for left > 0 {
|
||||
n := min(left, int64(chunkSize))
|
||||
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
left -= n
|
||||
i++
|
||||
}
|
||||
// Upload the manifest
|
||||
headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath)
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
_, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", "", headers)
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Set the mtime
|
||||
m := swift.Metadata{}
|
||||
m.SetModTime(modTime)
|
||||
_, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", m.ObjectHeaders())
|
||||
func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Note whether this has a manifest before starting
|
||||
isManifest, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
m := swift.Metadata{}
|
||||
m.SetModTime(modTime)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(chunkSize) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it
|
||||
_, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", "", headers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If file was a manifest then remove old/all segments
|
||||
if isManifest {
|
||||
err = o.removeSegments(uniquePrefix)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
return err
|
||||
o.headers = nil // wipe old metadata
|
||||
return o.readMetaData()
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectSwift) Remove() error {
|
||||
return o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote)
|
||||
func (o *Object) Remove() error {
|
||||
isManifestFile, err := o.isManifestFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ...then segments if required
|
||||
if isManifestFile {
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ fs.Fs = &FsSwift{}
|
||||
var _ fs.Object = &FsObjectSwift{}
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test Swift filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: go run gen_tests.go or make gen_tests
|
||||
// Regenerate with: make gen_tests
|
||||
package swift_test
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil))
|
||||
fstests.NilObject = fs.Object((*swift.Object)(nil))
|
||||
fstests.RemoteName = "TestSwift:"
|
||||
}
|
||||
|
||||
@@ -34,6 +34,9 @@ func TestFsListRoot(t *testing.T) { fstests.TestFsListRoot(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewFsObject(t *testing.T) { fstests.TestFsNewFsObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
|
||||
Reference in New Issue
Block a user