mirror of
https://github.com/rclone/rclone.git
synced 2026-01-13 14:03:19 +00:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fcc18779b | ||
|
|
96cc3e5a0b | ||
|
|
cc8fe0630c | ||
|
|
1d9e76bb0f | ||
|
|
337110b7a0 | ||
|
|
83733205f6 | ||
|
|
d8306938a1 | ||
|
|
88ea8b305d | ||
|
|
e2f4d7b5e3 | ||
|
|
8140869767 | ||
|
|
6a8de87116 | ||
|
|
0da6f24221 | ||
|
|
771e60bd07 | ||
|
|
40b3c4883f | ||
|
|
e320f4a988 | ||
|
|
5835f15f21 | ||
|
|
67c311233b | ||
|
|
3fcff32524 | ||
|
|
472f065ce7 | ||
|
|
6c6d7eb770 | ||
|
|
c646ada3c3 | ||
|
|
f55359b3ac | ||
|
|
9d9a17547a | ||
|
|
c6dc88766b | ||
|
|
754ce9dec6 | ||
|
|
bd5f685d0a | ||
|
|
c663e24669 | ||
|
|
5948764e9e | ||
|
|
539ad44757 | ||
|
|
74994a2ec1 | ||
|
|
97dced6a0b | ||
|
|
e04acb09ce | ||
|
|
87ed7fc932 | ||
|
|
90744301d3 | ||
|
|
bf4879f57f | ||
|
|
e22b445cff | ||
|
|
5ab7970e18 | ||
|
|
e984eeedc4 | ||
|
|
968b5a0984 | ||
|
|
7af1282375 | ||
|
|
d9fcc32f70 | ||
|
|
870a9fc3b2 | ||
|
|
8e3703abeb | ||
|
|
ba81277bbe |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -4,6 +4,7 @@ rclone
|
||||
rclonetest/rclonetest
|
||||
build
|
||||
docs/public
|
||||
README.html
|
||||
README.txt
|
||||
MANUAL.md
|
||||
MANUAL.html
|
||||
MANUAL.txt
|
||||
rclone.1
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4
|
||||
|
||||
23
Makefile
23
Makefile
@@ -10,16 +10,19 @@ test: rclone
|
||||
go test ./...
|
||||
cd fs && ./test_all.sh
|
||||
|
||||
doc: rclone.1 README.html README.txt
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
rclone.1: README.md
|
||||
pandoc -s --from markdown --to man README.md -o rclone.1
|
||||
rclone.1: MANUAL.md
|
||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||
|
||||
README.html: README.md
|
||||
pandoc -s --from markdown_github --to html README.md -o README.html
|
||||
MANUAL.md: make_manual.py docs/content/*.md
|
||||
./make_manual.py
|
||||
|
||||
README.txt: README.md
|
||||
pandoc -s --from markdown_github --to plain README.md -o README.txt
|
||||
MANUAL.html: MANUAL.md
|
||||
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
|
||||
|
||||
MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
@@ -29,7 +32,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 README.html README.txt
|
||||
rm -f rclone rclonetest/rclonetest rclone.1 MANUAL.md MANUAL.html MANUAL.txt
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -52,11 +55,11 @@ tag:
|
||||
echo -e "package fs\n const Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
@echo "Add this to changelog in README.md"
|
||||
@echo "Add this to changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I`
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline
|
||||
@echo "Then commit the changes"
|
||||
@echo git commit -m "Version $(NEW_TAG)" -a -v
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
|
||||
358
README.md
358
README.md
@@ -1,12 +1,13 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jul 7, 2014
|
||||
|
||||
Rclone
|
||||
======
|
||||
|
||||
[](http://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Google Drive
|
||||
@@ -26,352 +27,13 @@ Features
|
||||
* Check mode to check all MD5SUMs
|
||||
* Can sync to and from network, eg two different Drive accounts
|
||||
|
||||
See the Home page for more documentation and configuration walkthroughs.
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
Download the binary for your OS from
|
||||
|
||||
* http://rclone.org/downloads/
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go install github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
First you'll need to configure rclone. As the object storage systems
|
||||
have quite complicated authentication these are kept in a config file
|
||||
`.rclone.conf` in your home directory by default. (You can use the
|
||||
`--config` option to choose a different config file.)
|
||||
|
||||
The easiest way to make the config is to run rclone with the config
|
||||
option, Eg
|
||||
|
||||
rclone config
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
Rclone syncs a directory tree from local to remote.
|
||||
|
||||
Its basic syntax is
|
||||
|
||||
Syntax: [options] subcommand <parameters> <parameters...>
|
||||
|
||||
See below for how to specify the source and destination paths.
|
||||
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
rclone md5sum [remote:path]
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
rclone mkdir remote:path
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone config
|
||||
|
||||
Enter an interactive configuration session.
|
||||
|
||||
rclone help
|
||||
|
||||
This help.
|
||||
|
||||
General options:
|
||||
|
||||
```
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
--contimeout=1m0s: Connect timeout
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--log-file="": Log everything to this file
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats (0 to disable)
|
||||
--timeout=5m0s: IO idle timeout
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
-V, --version=false: Print the version number
|
||||
```
|
||||
|
||||
Developer options:
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
|
||||
Local Filesystem
|
||||
----------------
|
||||
|
||||
Paths are specified as normal filesystem paths, so
|
||||
|
||||
rclone sync /home/source /tmp/destination
|
||||
|
||||
Will sync `/home/source` to `/tmp/destination`
|
||||
|
||||
Swift / Rackspace cloudfiles / Memset Memstore
|
||||
----------------------------------------------
|
||||
|
||||
Paths are specified as remote:container (or remote: for the `lsd`
|
||||
command.) You may put subdirectories in too, eg
|
||||
`remote:container/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a swift container called backup:
|
||||
|
||||
rclone sync /home/source swift:backup
|
||||
|
||||
For more help see the [online docs on Openstack Swift](http://rclone.org/swift).
|
||||
|
||||
Amazon S3
|
||||
---------
|
||||
|
||||
Paths are specified as remote:bucket. You may put subdirectories in
|
||||
too, eg `remote:bucket/path/to/dir`.
|
||||
|
||||
So to copy a local directory to a s3 container called backup
|
||||
|
||||
rclone sync /home/source s3:backup
|
||||
|
||||
For more help see the [online docs on Amazon S3](http://rclone.org/s3).
|
||||
|
||||
Google drive
|
||||
------------
|
||||
|
||||
Paths are specified as remote:path Drive paths may be as deep as required.
|
||||
|
||||
The initial setup for drive involves getting a token from Google drive
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
For more help see the [online docs on Google Drive](http://rclone.org/drive).
|
||||
|
||||
Dropbox
|
||||
-------
|
||||
|
||||
Paths are specified as remote:path Dropbox paths may be as deep as required.
|
||||
|
||||
The initial setup for dropbox involves getting a token from Dropbox
|
||||
which you need to do in your browser. `rclone config` walks you
|
||||
through it.
|
||||
|
||||
To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source dropbox:backup
|
||||
|
||||
For more help see the [online docs on Dropbox](http://rclone.org/dropbox).
|
||||
|
||||
Google Cloud Storage
|
||||
--------------------
|
||||
|
||||
Paths are specified as remote:path Google Cloud Storage paths may be
|
||||
as deep as required.
|
||||
|
||||
The initial setup for Google Cloud Storage involves getting a token
|
||||
from Google which you need to do in your browser. `rclone config`
|
||||
walks you through it.
|
||||
|
||||
To copy a local directory to a google cloud storage directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
For more help see the [online docs on Google Cloud Storage](http://rclone.org/googlecloudstorage/).
|
||||
|
||||
Single file copies
|
||||
------------------
|
||||
|
||||
Rclone can copy single files
|
||||
|
||||
rclone src:path/to/file dst:path/dir
|
||||
|
||||
Or
|
||||
|
||||
rclone src:path/to/file dst:path/to/file
|
||||
|
||||
Note that you can't rename the file if you are copying from one file to another.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
|
||||
Bugs
|
||||
----
|
||||
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
|
||||
Changelog
|
||||
---------
|
||||
* v1.13 - 2015-05-10
|
||||
* Revise documentation (especially sync)
|
||||
* Implement --timeout and --conntimeout
|
||||
* s3: ignore etags from multipart uploads which aren't md5sums
|
||||
* v1.12 - 2015-03-15
|
||||
* drive: Use chunked upload for files above a certain size
|
||||
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
|
||||
* drive: switch to insert from update when a failed copy deletes the upload
|
||||
* core: Log duplicate files if they are detected
|
||||
* v1.11 - 2015-03-04
|
||||
* swift: add region parameter
|
||||
* drive: fix crash on failed to update remote mtime
|
||||
* In remote paths, change native directory separators to /
|
||||
* Add synchronization to ls/lsl/lsd output to stop corruptions
|
||||
* Ensure all stats/log messages to go stderr
|
||||
* Add --log-file flag to log everything (including panics) to file
|
||||
* Make it possible to disable stats printing with --stats=0
|
||||
* Implement --bwlimit to limit data transfer bandwidth
|
||||
* v1.10 - 2015-02-12
|
||||
* s3: list an unlimited number of items
|
||||
* Fix getting stuck in the configurator
|
||||
* v1.09 - 2015-02-07
|
||||
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
|
||||
* local: Fix directory separators on Windows
|
||||
* drive: fix rate limit exceeded errors
|
||||
* v1.08 - 2015-02-04
|
||||
* drive: fix subdirectory listing to not list entire drive
|
||||
* drive: Fix SetModTime
|
||||
* dropbox: adapt code to recent library changes
|
||||
* v1.07 - 2014-12-23
|
||||
* google cloud storage: fix memory leak
|
||||
* v1.06 - 2014-12-12
|
||||
* Fix "Couldn't find home directory" on OSX
|
||||
* swift: Add tenant parameter
|
||||
* Use new location of Google API packages
|
||||
* v1.05 - 2014-08-09
|
||||
* Improved tests and consequently lots of minor fixes
|
||||
* core: Fix race detected by go race detector
|
||||
* core: Fixes after running errcheck
|
||||
* drive: reset root directory on Rmdir and Purge
|
||||
* fs: Document that Purger returns error on empty directory, test and fix
|
||||
* google cloud storage: fix ListDir on subdirectory
|
||||
* google cloud storage: re-read metadata in SetModTime
|
||||
* s3: make reading metadata more reliable to work around eventual consistency problems
|
||||
* s3: strip trailing / from ListDir()
|
||||
* swift: return directories without / in ListDir
|
||||
* v1.04 - 2014-07-21
|
||||
* google cloud storage: Fix crash on Update
|
||||
* v1.03 - 2014-07-20
|
||||
* swift, s3, dropbox: fix updated files being marked as corrupted
|
||||
* Make compile with go 1.1 again
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or send pull requests.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Your name goes here!
|
||||
|
||||
@@ -9,8 +9,8 @@ Making a release
|
||||
* go get -u -f -v ./...
|
||||
* make test
|
||||
* make tag
|
||||
* edit README.md
|
||||
* git commit fs/version.go README.md docs/content/downloads.md
|
||||
* edit docs/content/changelog.md
|
||||
* git commit -a -v
|
||||
* make retag
|
||||
* # Set the GOPATH for a gox enabled compiler - . ~/bin/go-cross
|
||||
* make cross
|
||||
|
||||
@@ -21,8 +21,8 @@ mv build/rclone-${VERSION}-darwin-386 build/rclone-${VERSION}-osx-386
|
||||
cd build
|
||||
|
||||
for d in `ls`; do
|
||||
cp -a ../README.txt $d/
|
||||
cp -a ../README.html $d/
|
||||
cp -a ../MANUAL.txt $d/README.txt
|
||||
cp -a ../MANUAL.html $d/README.html
|
||||
cp -a ../rclone.1 $d/
|
||||
zip -r9 $d.zip $d
|
||||
rm -rf $d
|
||||
|
||||
17
docs/content/authors.md
Normal file
17
docs/content/authors.md
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
title: "Authors"
|
||||
description: "Rclone Authors and Contributors"
|
||||
date: "2014-06-16"
|
||||
---
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
|
||||
Contributors
|
||||
------------
|
||||
|
||||
* Alex Couper <amcouper@gmail.com>
|
||||
* Leonid Shalupov <leonid@shalupov.com>
|
||||
* Shimon Doodkin <helpmepro1@gmail.com>
|
||||
31
docs/content/bugs.md
Normal file
31
docs/content/bugs.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: "Bugs"
|
||||
description: "Rclone Bugs and Limitations"
|
||||
date: "2014-06-16"
|
||||
---
|
||||
|
||||
Bugs and Limitations
|
||||
--------------------
|
||||
|
||||
### Empty directories are left behind / not created ##
|
||||
|
||||
With remotes that have a concept of directory, eg Local and Drive,
|
||||
empty directories may be left behind, or not created when one was
|
||||
expected.
|
||||
|
||||
This is because rclone doesn't have a concept of a directory - it only
|
||||
works on objects. Most of the object storage systems can't actually
|
||||
store a directory so there is nowhere for rclone to store anything
|
||||
about directories.
|
||||
|
||||
You can work round this to some extent with the`purge` command which
|
||||
will delete everything under the path, **inluding** empty directories.
|
||||
|
||||
This may be fixed at some point in
|
||||
[Issue #100](https://github.com/ncw/rclone/issues/100)
|
||||
|
||||
### Directory timestamps aren't preserved ##
|
||||
|
||||
For the same reason as the above, rclone doesn't have a concept of a
|
||||
directory - it only works on objects, therefore it can't preserve the
|
||||
timestamps of directories.
|
||||
139
docs/content/changelog.md
Normal file
139
docs/content/changelog.md
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2015-08-17"
|
||||
---
|
||||
|
||||
Changelog
|
||||
---------
|
||||
|
||||
* v1.18 - 2015-08-17
|
||||
* drive
|
||||
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
|
||||
* Add "Forbidden to download" message for files with no downloadURL
|
||||
* dropbox
|
||||
* Remove datastore
|
||||
* This was deprecated and it caused a lot of problems
|
||||
* Modification times and MD5SUMs no longer stored
|
||||
* Fix uploading files > 2GB
|
||||
* s3
|
||||
* use official AWS SDK from github.com/aws/aws-sdk-go
|
||||
* **NB** will most likely require you to delete and recreate remote
|
||||
* enable multipart upload which enables files > 5GB
|
||||
* tested with Ceph / RadosGW / S3 emulation
|
||||
* many thanks to Sam Liston and Brian Haymore at the [Utah
|
||||
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
|
||||
* misc
|
||||
* Show errors when reading the config file
|
||||
* Do not print stats in quiet mode - thanks Leonid Shalupov
|
||||
* Add FAQ
|
||||
* Fix created directories not obeying umask
|
||||
* Linux installation instructions - thanks Shimon Doodkin
|
||||
* v1.17 - 2015-06-14
|
||||
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
|
||||
* v1.16 - 2015-06-09
|
||||
* Fix uploading big files which was causing timeouts or panics
|
||||
* Don't check md5sum after download with --size-only
|
||||
* v1.15 - 2015-06-06
|
||||
* Add --checksum flag to only discard transfers by MD5SUM - thanks Alex Couper
|
||||
* Implement --size-only flag to sync on size not checksum & modtime
|
||||
* Expand docs and remove duplicated information
|
||||
* Document rclone's limitations with directories
|
||||
* dropbox: update docs about case insensitivity
|
||||
* v1.14 - 2015-05-21
|
||||
* local: fix encoding of non utf-8 file names - fixes a duplicate file problem
|
||||
* drive: docs about rate limiting
|
||||
* google cloud storage: Fix compile after API change in "google.golang.org/api/storage/v1"
|
||||
* v1.13 - 2015-05-10
|
||||
* Revise documentation (especially sync)
|
||||
* Implement --timeout and --conntimeout
|
||||
* s3: ignore etags from multipart uploads which aren't md5sums
|
||||
* v1.12 - 2015-03-15
|
||||
* drive: Use chunked upload for files above a certain size
|
||||
* drive: add --drive-chunk-size and --drive-upload-cutoff parameters
|
||||
* drive: switch to insert from update when a failed copy deletes the upload
|
||||
* core: Log duplicate files if they are detected
|
||||
* v1.11 - 2015-03-04
|
||||
* swift: add region parameter
|
||||
* drive: fix crash on failed to update remote mtime
|
||||
* In remote paths, change native directory separators to /
|
||||
* Add synchronization to ls/lsl/lsd output to stop corruptions
|
||||
* Ensure all stats/log messages to go stderr
|
||||
* Add --log-file flag to log everything (including panics) to file
|
||||
* Make it possible to disable stats printing with --stats=0
|
||||
* Implement --bwlimit to limit data transfer bandwidth
|
||||
* v1.10 - 2015-02-12
|
||||
* s3: list an unlimited number of items
|
||||
* Fix getting stuck in the configurator
|
||||
* v1.09 - 2015-02-07
|
||||
* windows: Stop drive letters (eg C:) getting mixed up with remotes (eg drive:)
|
||||
* local: Fix directory separators on Windows
|
||||
* drive: fix rate limit exceeded errors
|
||||
* v1.08 - 2015-02-04
|
||||
* drive: fix subdirectory listing to not list entire drive
|
||||
* drive: Fix SetModTime
|
||||
* dropbox: adapt code to recent library changes
|
||||
* v1.07 - 2014-12-23
|
||||
* google cloud storage: fix memory leak
|
||||
* v1.06 - 2014-12-12
|
||||
* Fix "Couldn't find home directory" on OSX
|
||||
* swift: Add tenant parameter
|
||||
* Use new location of Google API packages
|
||||
* v1.05 - 2014-08-09
|
||||
* Improved tests and consequently lots of minor fixes
|
||||
* core: Fix race detected by go race detector
|
||||
* core: Fixes after running errcheck
|
||||
* drive: reset root directory on Rmdir and Purge
|
||||
* fs: Document that Purger returns error on empty directory, test and fix
|
||||
* google cloud storage: fix ListDir on subdirectory
|
||||
* google cloud storage: re-read metadata in SetModTime
|
||||
* s3: make reading metadata more reliable to work around eventual consistency problems
|
||||
* s3: strip trailing / from ListDir()
|
||||
* swift: return directories without / in ListDir
|
||||
* v1.04 - 2014-07-21
|
||||
* google cloud storage: Fix crash on Update
|
||||
* v1.03 - 2014-07-20
|
||||
* swift, s3, dropbox: fix updated files being marked as corrupted
|
||||
* Make compile with go 1.1 again
|
||||
* v1.02 - 2014-07-19
|
||||
* Implement Dropbox remote
|
||||
* Implement Google Cloud Storage remote
|
||||
* Verify Md5sums and Sizes after copies
|
||||
* Remove times from "ls" command - lists sizes only
|
||||
* Add add "lsl" - lists times and sizes
|
||||
* Add "md5sum" command
|
||||
* v1.01 - 2014-07-04
|
||||
* drive: fix transfer of big files using up lots of memory
|
||||
* v1.00 - 2014-07-03
|
||||
* drive: fix whole second dates
|
||||
* v0.99 - 2014-06-26
|
||||
* Fix --dry-run not working
|
||||
* Make compatible with go 1.1
|
||||
* v0.98 - 2014-05-30
|
||||
* s3: Treat missing Content-Length as 0 for some ceph installations
|
||||
* rclonetest: add file with a space in
|
||||
* v0.97 - 2014-05-05
|
||||
* Implement copying of single files
|
||||
* s3 & swift: support paths inside containers/buckets
|
||||
* v0.96 - 2014-04-24
|
||||
* drive: Fix multiple files of same name being created
|
||||
* drive: Use o.Update and fs.Put to optimise transfers
|
||||
* Add version number, -V and --version
|
||||
* v0.95 - 2014-03-28
|
||||
* rclone.org: website, docs and graphics
|
||||
* drive: fix path parsing
|
||||
* v0.94 - 2014-03-27
|
||||
* Change remote format one last time
|
||||
* GNU style flags
|
||||
* v0.93 - 2014-03-16
|
||||
* drive: store token in config file
|
||||
* cross compile other versions
|
||||
* set strict permissions on config file
|
||||
* v0.92 - 2014-03-15
|
||||
* Config fixes and --config option
|
||||
* v0.91 - 2014-03-15
|
||||
* Make config file
|
||||
* v0.90 - 2013-06-27
|
||||
* Project named rclone
|
||||
* v0.00 - 2012-11-18
|
||||
* Project started
|
||||
@@ -5,8 +5,17 @@ date: "2014-04-26"
|
||||
---
|
||||
|
||||
Contact the rclone project
|
||||
--------------------------
|
||||
|
||||
The project website is at:
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
|
||||
There you can file bug reports, ask for help or contribute pull
|
||||
requests.
|
||||
|
||||
See also
|
||||
|
||||
* [Github project page for source, reporting bugs and pull requests](http://github.com/ncw/rclone)
|
||||
* <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page for general comments</a></li>
|
||||
|
||||
Or email [Nick Craig-Wood](mailto:nick@craig-wood.com)
|
||||
|
||||
@@ -1,22 +1,9 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Documentation"
|
||||
date: "2014-07-17"
|
||||
description: "Rclone Usage"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`.
|
||||
|
||||
Configure
|
||||
---------
|
||||
|
||||
@@ -30,11 +17,13 @@ option:
|
||||
|
||||
rclone config
|
||||
|
||||
See below for detailed instructions for
|
||||
See the following for detailed instructions for
|
||||
|
||||
* [Google drive](/drive/)
|
||||
* [Amazon S3](/s3/)
|
||||
* [Swift / Rackspace Cloudfiles / Memset Memstore](/swift/)
|
||||
* [Dropbox](/dropbox/)
|
||||
* [Google Cloud Storage](/googlcloudstorage/)
|
||||
* [Local filesystem](/local/)
|
||||
|
||||
Usage
|
||||
@@ -55,13 +44,13 @@ You can define as many storage paths as you like in the config file.
|
||||
Subcommands
|
||||
-----------
|
||||
|
||||
rclone copy source:path dest:path
|
||||
### rclone copy source:path dest:path ###
|
||||
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
MD5SUM. Doesn't delete files from the destination.
|
||||
|
||||
rclone sync source:path dest:path
|
||||
### rclone sync source:path dest:path ###
|
||||
|
||||
Sync the source to the destination, changing the destination
|
||||
only. Doesn't transfer unchanged files, testing by size and
|
||||
@@ -69,100 +58,202 @@ modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary. Since this can
|
||||
cause data loss, test first with the `--dry-run` flag.
|
||||
|
||||
rclone ls [remote:path]
|
||||
### rclone ls [remote:path] ###
|
||||
|
||||
List all the objects in the the path with size and path.
|
||||
|
||||
rclone lsd [remote:path]
|
||||
### rclone lsd [remote:path] ###
|
||||
|
||||
List all directories/containers/buckets in the the path.
|
||||
|
||||
rclone lsl [remote:path]
|
||||
### rclone lsl [remote:path] ###
|
||||
|
||||
List all the objects in the the path with modification time,
|
||||
size and path.
|
||||
|
||||
rclone md5sum [remote:path]
|
||||
### rclone md5sum [remote:path] ###
|
||||
|
||||
Produces an md5sum file for all the objects in the path. This
|
||||
is in the same format as the standard md5sum tool produces.
|
||||
|
||||
rclone mkdir remote:path
|
||||
### rclone mkdir remote:path ###
|
||||
|
||||
Make the path if it doesn't already exist
|
||||
|
||||
rclone rmdir remote:path
|
||||
### rclone rmdir remote:path ###
|
||||
|
||||
Remove the path. Note that you can't remove a path with
|
||||
objects in it, use purge for that.
|
||||
|
||||
rclone purge remote:path
|
||||
### rclone purge remote:path ###
|
||||
|
||||
Remove the path and all of its contents.
|
||||
|
||||
rclone check source:path dest:path
|
||||
### rclone check source:path dest:path ###
|
||||
|
||||
Checks the files in the source and destination match. It
|
||||
compares sizes and MD5SUMs and prints a report of files which
|
||||
don't match. It doesn't alter the source or destination.
|
||||
|
||||
rclone config
|
||||
### rclone config ###
|
||||
|
||||
Enter an interactive configuration session.
|
||||
|
||||
rclone help
|
||||
### rclone help ###
|
||||
|
||||
This help.
|
||||
Prints help on rclone commands and options.
|
||||
|
||||
```
|
||||
--bwlimit=0: Bandwidth limit in kBytes/s, or use suffix k|M|G
|
||||
--checkers=8: Number of checkers to run in parallel.
|
||||
--config="~/.rclone.conf": Config file.
|
||||
--contimeout=1m0s: Connect timeout
|
||||
-n, --dry-run=false: Do a trial run with no permanent changes
|
||||
--log-file="": Log everything to this file
|
||||
--modify-window=1ns: Max time diff to be considered the same
|
||||
-q, --quiet=false: Print as little stuff as possible
|
||||
--stats=1m0s: Interval to print stats (0 to disable)
|
||||
--timeout=5m0s: IO idle timeout
|
||||
--transfers=4: Number of file transfers to run in parallel.
|
||||
-v, --verbose=false: Print lots more stuff
|
||||
-V, --version=false: Print the version number
|
||||
```
|
||||
|
||||
Developer options:
|
||||
|
||||
```
|
||||
--cpuprofile="": Write cpu profile to file
|
||||
```
|
||||
|
||||
License
|
||||
Options
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included in this package).
|
||||
Rclone has a number of options to control its behaviour.
|
||||
|
||||
Bugs
|
||||
----
|
||||
Options which use TIME use the go time parser. A duration string is a
|
||||
possibly signed sequence of decimal numbers, each with optional
|
||||
fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
|
||||
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
|
||||
* Empty directories left behind with Local and Drive
|
||||
* eg purging a local directory with subdirectories doesn't work
|
||||
Options which use SIZE use kByte by default. However a suffix of `k`
|
||||
for kBytes, `M` for MBytes and `G` for GBytes may be used. These are
|
||||
the binary units, eg 2**10, 2**20, 2**30 respectively.
|
||||
|
||||
Contact and support
|
||||
-------------------
|
||||
### --bwlimit=SIZE ###
|
||||
|
||||
The project website is at:
|
||||
Bandwidth limit in kBytes/s, or use suffix k|M|G. The default is `0`
|
||||
which means to not limit bandwidth.
|
||||
|
||||
* https://github.com/ncw/rclone
|
||||
For example to limit bandwidth usage to 10 MBytes/s use `--bwlimit 10M`
|
||||
|
||||
There you can file bug reports, ask for help or contribute patches.
|
||||
This only limits the bandwidth of the data transfer, it doesn't limit
|
||||
the bandwith of the directory listings etc.
|
||||
|
||||
Authors
|
||||
-------
|
||||
### --checkers=N ###
|
||||
|
||||
* Nick Craig-Wood <nick@craig-wood.com>
|
||||
The number of checkers to run in parallel. Checkers do the equality
|
||||
checking of files during a sync. For some storage systems (eg s3,
|
||||
swift, dropbox) this can take a significant amount of time so they are
|
||||
run in parallel.
|
||||
|
||||
Contributors
|
||||
------------
|
||||
The default is to run 8 checkers in parallel.
|
||||
|
||||
* Your name goes here!
|
||||
### -c, --checksum ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
MD5SUM and size to determine if files are equal.
|
||||
|
||||
This is very useful when transferring between remotes which store the
|
||||
MD5SUM on the object which include swift, s3, drive, and google cloud
|
||||
storage.
|
||||
|
||||
Eg `rclone --checksum sync s3:/bucket swift:/bucket` would run much
|
||||
quicker than without the `--checksum` flag.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --config=CONFIG_FILE ###
|
||||
|
||||
Specify the location of the rclone config file. Normally this is in
|
||||
your home directory as a file called `.rclone.conf`. If you run
|
||||
`rclone -h` and look at the help for the `--config` option you will
|
||||
see where the default location is for you. Use this flag to override
|
||||
the config location, eg `rclone --config=".myconfig" .config`.
|
||||
|
||||
### --contimeout=TIME ###
|
||||
|
||||
Set the connection timeout. This should be in go time format which
|
||||
looks like `5s` for 5 seconds, `10m` for 10 minutes, or `3h30m`.
|
||||
|
||||
The connection timeout is the amount of time rclone will wait for a
|
||||
connection to go through to a remote object storage system. It is
|
||||
`1m` by default.
|
||||
|
||||
### -n, --dry-run ###
|
||||
|
||||
Do a trial run with no permanent changes. Use this in combination
|
||||
with the `-v` flag to see what rclone would do without actually doing
|
||||
it. Useful when setting up the `sync` command.
|
||||
|
||||
### --log-file=FILE ###
|
||||
|
||||
Log all of rclone's output to FILE. This is not active by default.
|
||||
This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
When checking whether a file has been modified, this is the maximum
|
||||
allowed time difference that a file can have and still be considered
|
||||
equivalent.
|
||||
|
||||
The default is `1ns` unless this is overridden by a remote. For
|
||||
example OS X only stores modification times to the nearest second so
|
||||
if you are reading and writing to an OS X filing system this will be
|
||||
`1s` by default.
|
||||
|
||||
This command line flag allows you to override that computed default.
|
||||
|
||||
### -q, --quiet ###
|
||||
|
||||
Normally rclone outputs stats and a completion message. If you set
|
||||
this flag it will make as little output as possible.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
see if they are equal. If you set this flag then rclone will check
|
||||
only the size.
|
||||
|
||||
This can be useful transferring files from dropbox which have been
|
||||
modified by the desktop sync client which doesn't set checksums of
|
||||
modification times in the same way as rclone.
|
||||
|
||||
When using this flag, rclone won't update mtimes of remote files if
|
||||
they are incorrect as it would normally.
|
||||
|
||||
### --stats=TIME ###
|
||||
|
||||
Rclone will print stats at regular intervals to show its progress.
|
||||
|
||||
This sets the interval.
|
||||
|
||||
The default is `1m`. Use 0 to disable.
|
||||
|
||||
### --timeout=TIME ###
|
||||
|
||||
This sets the IO idle timeout. If a transfer has started but then
|
||||
becomes idle for this long it is considered broken and disconnected.
|
||||
|
||||
The default is `5m`. Set to 0 to disable.
|
||||
|
||||
### --transfers=N ###
|
||||
|
||||
The number of file transfers to run in parallel. It can sometimes be
|
||||
useful to set this to a smaller number if the remote is giving a lot
|
||||
of timeouts or bigger if you have lots of bandwidth and a fast remote.
|
||||
|
||||
The default is to run 4 file transfers in parallel.
|
||||
|
||||
### -v, --verbose ###
|
||||
|
||||
If you set this flag, rclone will become very verbose telling you
|
||||
about every file it considers and transfers.
|
||||
|
||||
Very useful for debugging.
|
||||
|
||||
### -V, --version ###
|
||||
|
||||
Prints the version number
|
||||
|
||||
Developer options
|
||||
-----------------
|
||||
|
||||
These options are useful when developing or debugging rclone. There
|
||||
are also some more remote specific options which aren't documented
|
||||
here which are used for testing. These start with remote name eg
|
||||
`--drive-test-option`.
|
||||
|
||||
### --cpuprofile=FILE ###
|
||||
|
||||
Write cpu profile to file. This can be analysed with `go tool pprof`.
|
||||
|
||||
@@ -2,34 +2,34 @@
|
||||
title: "Rclone downloads"
|
||||
description: "Download rclone binaries for your OS."
|
||||
type: page
|
||||
date: "2015-05-10"
|
||||
date: "2015-08-17"
|
||||
---
|
||||
|
||||
Rclone Download v1.13
|
||||
Rclone Download v1.18
|
||||
=====================
|
||||
|
||||
* Windows
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-windows-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-windows-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-windows-amd64.zip)
|
||||
* OSX
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-osx-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-osx-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-osx-amd64.zip)
|
||||
* Linux
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-linux-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-linux-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-arm.zip)
|
||||
* FreeBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-freebsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-arm.zip)
|
||||
* NetBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.13-netbsd-arm.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-amd64.zip)
|
||||
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-arm.zip)
|
||||
* OpenBSD
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.13-openbsd-amd64.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-386.zip)
|
||||
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-amd64.zip)
|
||||
* Plan 9
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.13-plan9-386.zip)
|
||||
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-plan9-386.zip)
|
||||
|
||||
Older downloads can be found [here](http://downloads.rclone.org/)
|
||||
|
||||
@@ -69,13 +69,11 @@ To copy a local directory to a drive directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google drive stores modification times accurate to 1 ms.
|
||||
|
||||
Revisions
|
||||
---------
|
||||
### Revisions ###
|
||||
|
||||
Google drive stores revisions of files. When you upload a change to
|
||||
an existing file to google drive using rclone it will create a new
|
||||
@@ -86,3 +84,16 @@ was
|
||||
|
||||
* They are deleted after 30 days or 100 revisions (whatever comes first).
|
||||
* They do not count towards a user storage quota.
|
||||
|
||||
### Deleting files ###
|
||||
|
||||
By default rclone will delete files permanently when requested. If
|
||||
sending them to the trash is required instead then use the
|
||||
`--drive-use-trash` flag.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Drive has quite a lot of rate limiting. This causes rclone to be
|
||||
limited to transferring about 2 files per second only. Individual
|
||||
files may be transferred much faster at 100s of MBytes/s but lots of
|
||||
small files can take a long time.
|
||||
|
||||
@@ -71,10 +71,12 @@ To copy a local directory to a dropbox directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time and MD5SUMs ###
|
||||
|
||||
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
|
||||
a Dropbox datastore called "rclone". Dropbox datastores are limited
|
||||
to 100,000 rows so this is the maximum number of files rclone can
|
||||
manage on Dropbox.
|
||||
Dropbox doesn't have the capability of storing modification times or
|
||||
MD5SUMs so syncs will effectively have the `--size-only` flag set.
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Dropbox is case sensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
50
docs/content/faq.md
Normal file
50
docs/content/faq.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
title: "FAQ"
|
||||
description: "Rclone Frequently Asked Questions"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Frequently Asked Questions
|
||||
--------------------------
|
||||
|
||||
### Do all cloud storage systems support all rclone commands ###
|
||||
|
||||
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
|
||||
work on all the remote storage systems.
|
||||
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
Note that it effectively downloads the file and uploads it again, so
|
||||
the node running rclone would need to have lots of bandwidth.
|
||||
|
||||
The syncs would be incremental (on a file by file basis).
|
||||
|
||||
Eg
|
||||
|
||||
rclone sync drive:Folder s3:bucket
|
||||
|
||||
|
||||
### Using rclone from multiple locations at the same time ###
|
||||
|
||||
You can use rclone from multiple places at the same time if you choose
|
||||
different subdirectory for the output, eg
|
||||
|
||||
```
|
||||
Server A> rclone sync /tmp/whatever remote:ServerA
|
||||
Server B> rclone sync /tmp/whatever remote:ServerB
|
||||
```
|
||||
|
||||
If you sync to the same directory then you should use rclone copy
|
||||
otherwise the two rclones may delete each others files, eg
|
||||
|
||||
```
|
||||
Server A> rclone copy /tmp/whatever remote:Backup
|
||||
Server B> rclone copy /tmp/whatever remote:Backup
|
||||
```
|
||||
|
||||
The file names you upload from Server A and Server B should be
|
||||
different in this case, otherwise some file systems (eg Drive) may
|
||||
make duplicates.
|
||||
@@ -109,8 +109,7 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Google google cloud storage stores md5sums natively and rclone stores
|
||||
modification times as metadata on the object, under the "mtime" key in
|
||||
|
||||
39
docs/content/install.md
Normal file
39
docs/content/install.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "Install"
|
||||
description: "Rclone Installation"
|
||||
date: "2015-06-12"
|
||||
---
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
Rclone is a Go program and comes as a single binary file.
|
||||
|
||||
[Download](/downloads/) the relevant binary.
|
||||
|
||||
Or alternatively if you have Go installed use
|
||||
|
||||
go get github.com/ncw/rclone
|
||||
|
||||
and this will build the binary in `$GOPATH/bin`. If you have built
|
||||
rclone before then you will want to update its dependencies first with
|
||||
this (remove `-f` if using go < 1.4)
|
||||
|
||||
go get -u -v -f github.com/ncw/rclone/...
|
||||
|
||||
See the [Usage section](/docs/) of the docs for how to use rclone, or
|
||||
run `rclone -h`.
|
||||
|
||||
linux binary downloaded files install example
|
||||
-------
|
||||
|
||||
unzip rclone-v1.17-linux-amd64.zip
|
||||
cd rclone-v1.17-linux-amd64
|
||||
#copy binary file
|
||||
sudo cp rclone /usr/sbin/
|
||||
sudo chown root:root /usr/sbin/rclone
|
||||
sudo chmod 755 /usr/sbin/rclone
|
||||
#install manpage
|
||||
sudo mkdir -p /usr/local/share/man/man1
|
||||
sudo cp rclone.1 /usr/local/share/man/man1/
|
||||
sudo mandb
|
||||
34
docs/content/licence.md
Normal file
34
docs/content/licence.md
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
title: "Licence"
|
||||
description: "Rclone Licence"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
This is free software under the terms of MIT the license (check the
|
||||
COPYING file included with the source code).
|
||||
|
||||
```
|
||||
Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
```
|
||||
|
||||
@@ -16,10 +16,23 @@ Will sync `/home/source` to `/tmp/destination`
|
||||
These can be configured into the config file for consistencies sake,
|
||||
but it is probably easier not to.
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
Rclone reads and writes the modified time using an accuracy determined by
|
||||
the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second
|
||||
on OS X.
|
||||
|
||||
### Filenames ###
|
||||
|
||||
Filenames are expected to be encoded in UTF-8 on disk. This is the
|
||||
normal case for Windows and OS X. There is a bit more uncertainty in
|
||||
the Linux world, but new distributions will have UTF-8 encoded files
|
||||
names.
|
||||
|
||||
If an invalid (non-UTF8) filename is read, the invalid caracters will
|
||||
be replaced with the unicode replacement character, '<27>'. `rclone`
|
||||
will emit a debug message in this case (use `-v` to see), eg
|
||||
|
||||
```
|
||||
Local file system at .: Replacing invalid UTF-8 characters in "gro\xdf"
|
||||
```
|
||||
|
||||
@@ -27,41 +27,55 @@ Choose a number from below
|
||||
1) swift
|
||||
2) s3
|
||||
3) local
|
||||
4) drive
|
||||
4) google cloud storage
|
||||
5) dropbox
|
||||
6) drive
|
||||
type> 2
|
||||
AWS Access Key ID.
|
||||
access_key_id> accesskey
|
||||
AWS Secret Access Key (password).
|
||||
secret_access_key> secretaccesskey
|
||||
Endpoint for S3 API.
|
||||
Region to connect to.
|
||||
Choose a number from below, or type in your own value
|
||||
* The default endpoint - a good choice if you are unsure.
|
||||
* US Region, Northern Virginia or Pacific Northwest.
|
||||
* Leave location constraint empty.
|
||||
1) https://s3.amazonaws.com/
|
||||
* US Region, Northern Virginia only.
|
||||
* Leave location constraint empty.
|
||||
2) https://s3-external-1.amazonaws.com
|
||||
1) us-east-1
|
||||
* US West (Oregon) Region
|
||||
* Needs location constraint us-west-2.
|
||||
2) us-west-2
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region
|
||||
* Needs location constraint sa-east-1.
|
||||
9) https://s3-sa-east-1.amazonaws.com
|
||||
endpoint> 1
|
||||
Location constraint - must be set to match the Endpoint.
|
||||
9) sa-east-1
|
||||
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
|
||||
10) other-v2-signature
|
||||
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
|
||||
11) other-v4-signature
|
||||
region> 1
|
||||
Endpoint for S3 API.
|
||||
Leave blank if using AWS to use the default endpoint for the region.
|
||||
Specify if using an S3 clone such as Ceph.
|
||||
endpoint>
|
||||
Location constraint - must be set to match the Region. Used when creating buckets only.
|
||||
Choose a number from below, or type in your own value
|
||||
* Empty for US Region, Northern Virginia or Pacific Northwest.
|
||||
1)
|
||||
* US West (Oregon) Region.
|
||||
2) us-west-2
|
||||
* US West (Northern California) Region.
|
||||
3) us-west-1
|
||||
* EU (Ireland) Region.
|
||||
4) eu-west-1
|
||||
[snip]
|
||||
* South America (Sao Paulo) Region.
|
||||
9) sa-east-1
|
||||
location_constraint> 1
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
access_key_id = accesskey
|
||||
secret_access_key = secretaccesskey
|
||||
endpoint = https://s3.amazonaws.com/
|
||||
region = us-east-1
|
||||
endpoint =
|
||||
location_constraint =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
@@ -100,8 +114,52 @@ files in the bucket.
|
||||
|
||||
rclone sync /home/local/directory remote:bucket
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
rclone supports multipart uploads with S3 which means that it can
|
||||
upload files bigger than 5GB. Note that files uploaded with multipart
|
||||
upload don't have an MD5SUM.
|
||||
|
||||
### Ceph ###
|
||||
|
||||
Ceph is an object storage system which presents an Amazon S3 interface.
|
||||
|
||||
To use rclone with ceph, you need to set the following parameters in
|
||||
the config.
|
||||
|
||||
```
|
||||
access_key_id = Whatever
|
||||
secret_access_key = Whatever
|
||||
endpoint = https://ceph.endpoint.goes.here/
|
||||
region = other-v2-signature
|
||||
```
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
only write `/` in the secret access key.
|
||||
|
||||
Eg the dump from Ceph looks something like this (irrelevant keys
|
||||
removed).
|
||||
|
||||
```
|
||||
{
|
||||
"user_id": "xxx",
|
||||
"display_name": "xxxx",
|
||||
"keys": [
|
||||
{
|
||||
"user": "xxx",
|
||||
"access_key": "xxxxxx",
|
||||
"secret_key": "xxxxxx\/xxxx"
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
Because this is a json dump, it is encoding the `/` as `\/`, so if you
|
||||
use the secret key as `xxxxxx/xxxx` it will work fine.
|
||||
|
||||
@@ -87,8 +87,7 @@ excess files in the container.
|
||||
|
||||
rclone sync /home/local/directory remote:container
|
||||
|
||||
Modified time
|
||||
-------------
|
||||
### Modified time ###
|
||||
|
||||
The modified time is stored as metadata on the object as
|
||||
`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="description" content="{{ .Description }}">
|
||||
<meta name="author" content="Nick Craig-Wood">
|
||||
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
|
||||
<script>
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
|
||||
@@ -12,8 +12,18 @@
|
||||
<div class="collapse navbar-collapse navbar-ex1-collapse">
|
||||
<ul class="nav navbar-nav">
|
||||
<li><a href="/downloads/"><i class="fa fa-cloud-download"></i> Downloads</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Docs</a></li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Docs</a>
|
||||
<ul class="dropdown-menu">
|
||||
<li><a href="/install/"><i class="fa fa-book"></i> Installation</a></li>
|
||||
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
|
||||
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
|
||||
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
|
||||
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
|
||||
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
|
||||
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li class="dropdown">
|
||||
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><b class="caret"></b> Storage Systems</a>
|
||||
<ul class="dropdown-menu">
|
||||
@@ -25,6 +35,7 @@
|
||||
<li><a href="/local/"><i class="fa fa-file"></i> Local</a></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li><a href="/contact/"><i class="fa fa-envelope"></i> Contact</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
BIN
docs/static/img/rclone-16x16.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1019 B |
@@ -39,6 +39,7 @@ const (
|
||||
var (
|
||||
// Flags
|
||||
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
|
||||
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize = fs.SizeSuffix(256 * 1024)
|
||||
@@ -681,7 +682,7 @@ func (f *FsDrive) List() fs.ObjectsChan {
|
||||
err := f.findRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't find root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
if f.root == "" && *driveFullList {
|
||||
err = f.listDirFull(f.rootId, "", out)
|
||||
@@ -690,7 +691,7 @@ func (f *FsDrive) List() fs.ObjectsChan {
|
||||
}
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "List failed: %s", err)
|
||||
fs.ErrorLog(f, "List failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -705,7 +706,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
err := f.findRoot(false)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't find root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find root: %s", err)
|
||||
} else {
|
||||
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
|
||||
dir := &fs.Dir{
|
||||
@@ -719,7 +720,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "ListDir failed: %s", err)
|
||||
fs.ErrorLog(f, "ListDir failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -803,7 +804,11 @@ func (f *FsDrive) Rmdir() error {
|
||||
// Delete the directory if it isn't the root
|
||||
if f.root != "" {
|
||||
f.call(&err, func() {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -832,7 +837,11 @@ func (f *FsDrive) Purge() error {
|
||||
return err
|
||||
}
|
||||
f.call(&err, func() {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
if *driveUseTrash {
|
||||
_, err = f.svc.Files.Trash(f.rootId).Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.rootId).Do()
|
||||
}
|
||||
})
|
||||
f.resetRoot()
|
||||
if err != nil {
|
||||
@@ -934,7 +943,7 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
// New metadata
|
||||
@@ -948,7 +957,7 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
return
|
||||
}
|
||||
// Update info from read data
|
||||
@@ -962,6 +971,9 @@ func (o *FsObjectDrive) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
|
||||
if o.url == "" {
|
||||
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
|
||||
}
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1020,7 +1032,11 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
|
||||
func (o *FsObjectDrive) Remove() error {
|
||||
var err error
|
||||
o.drive.call(&err, func() {
|
||||
err = o.drive.svc.Files.Delete(o.id).Do()
|
||||
if *driveUseTrash {
|
||||
_, err = o.drive.svc.Files.Trash(o.id).Do()
|
||||
} else {
|
||||
err = o.drive.svc.Files.Delete(o.id).Do()
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,11 +6,6 @@ Limitations of dropbox
|
||||
|
||||
File system is case insensitive
|
||||
|
||||
The datastore is limited to 100,000 records which therefore is the
|
||||
limit of the number of files that rclone can use on dropbox.
|
||||
|
||||
FIXME only open datastore if we need it?
|
||||
|
||||
FIXME Getting this sometimes
|
||||
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
|
||||
This is a JSON decode error - from Update / UploadByChunk
|
||||
@@ -38,10 +33,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
@@ -50,17 +45,10 @@ import (
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
uploadChunkSize = 64 * 1024 // chunk size for upload
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
datastoreName = "rclone"
|
||||
tableName = "metadata"
|
||||
md5sumField = "md5sum"
|
||||
mtimeField = "mtime"
|
||||
maxCommitRetries = 5
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
rcloneAppKey = "5jcck7diasz0rqy"
|
||||
rcloneAppSecret = "1n9m04y2zx7bf26"
|
||||
uploadChunkSize = 64 * 1024 // chunk size for upload
|
||||
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -111,24 +99,19 @@ func configHelper(name string) {
|
||||
|
||||
// FsDropbox represents a remote dropbox server
|
||||
type FsDropbox struct {
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix
|
||||
slashRootSlash string // root with "/" prefix and postix
|
||||
datastoreManager *dropbox.DatastoreManager
|
||||
datastore *dropbox.Datastore
|
||||
table *dropbox.Table
|
||||
datastoreMutex sync.Mutex // lock this when using the datastore
|
||||
datastoreErr error // pending errors on the datastore
|
||||
db *dropbox.Dropbox // the connection to the dropbox server
|
||||
root string // the path we are working on
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
}
|
||||
|
||||
// FsObjectDropbox describes a dropbox object
|
||||
type FsObjectDropbox struct {
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
md5sum string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
dropbox *FsDropbox // what this object is part of
|
||||
remote string // The remote path
|
||||
bytes int64 // size of the object
|
||||
modTime time.Time // time it was last modified
|
||||
hasMetadata bool // metadata is valid
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -170,12 +153,6 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Authorize the client
|
||||
db.SetAccessToken(token)
|
||||
|
||||
// Make a db to store rclone metadata in
|
||||
f.datastoreManager = db.NewDatastoreManager()
|
||||
|
||||
// Open the datastore in the background
|
||||
go f.openDataStore()
|
||||
|
||||
// See if the root is actually an object
|
||||
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
|
||||
if err == nil && !entry.IsDir {
|
||||
@@ -196,37 +173,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Sets root in f
|
||||
func (f *FsDropbox) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
f.slashRoot = "/" + f.root
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if f.root != "" {
|
||||
if lowerCaseRoot != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
|
||||
// Opens the datastore in f
|
||||
func (f *FsDropbox) openDataStore() {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
fs.Debug(f, "Open rclone datastore")
|
||||
// Open the rclone datastore
|
||||
var err error
|
||||
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
|
||||
// Get the table we are using
|
||||
f.table, err = f.datastore.GetTable(tableName)
|
||||
if err != nil {
|
||||
fs.Log(f, "Failed to open datastore table: %v", err)
|
||||
f.datastoreErr = err
|
||||
return
|
||||
}
|
||||
fs.Debug(f, "Open rclone datastore finished")
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
@@ -254,27 +209,36 @@ func (f *FsDropbox) NewFsObject(remote string) fs.Object {
|
||||
return f.newFsObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Strips the root off entry and returns it
|
||||
func (f *FsDropbox) stripRoot(entry *dropbox.Entry) string {
|
||||
path := entry.Path
|
||||
if strings.HasPrefix(path, f.slashRootSlash) {
|
||||
path = path[len(f.slashRootSlash):]
|
||||
// Strips the root off path and returns it
|
||||
func (f *FsDropbox) stripRoot(path string) *string {
|
||||
lowercase := strings.ToLower(path)
|
||||
|
||||
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
|
||||
return nil
|
||||
}
|
||||
return path
|
||||
|
||||
stripped := path[len(f.slashRootSlash):]
|
||||
return &stripped
|
||||
}
|
||||
|
||||
// Walk the root returning a channel of FsObjects
|
||||
func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
// Track path component case, it could be different for entries coming from DropBox API
|
||||
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
|
||||
// and https://github.com/ncw/rclone/issues/53
|
||||
nameTree := NewNameTree()
|
||||
cursor := ""
|
||||
for {
|
||||
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list: %s", err)
|
||||
break
|
||||
} else {
|
||||
if deltaPage.Reset && cursor != "" {
|
||||
fs.Log(f, "Unexpected reset during listing - try again")
|
||||
fs.ErrorLog(f, "Unexpected reset during listing - try again")
|
||||
fs.Stats.Error()
|
||||
break
|
||||
}
|
||||
@@ -284,20 +248,38 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
entry := deltaEntry.Entry
|
||||
if entry == nil {
|
||||
// This notifies of a deleted object
|
||||
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
|
||||
key := metadataKey(deltaEntry.Path) // Path is lowercased
|
||||
err := f.deleteMetadata(key)
|
||||
if err != nil {
|
||||
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
|
||||
// Don't accumulate Error here
|
||||
} else {
|
||||
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
if entry.IsDir {
|
||||
// ignore directories
|
||||
lastSlashIndex := strings.LastIndex(entry.Path, "/")
|
||||
|
||||
var parentPath string
|
||||
if lastSlashIndex == 0 {
|
||||
parentPath = ""
|
||||
} else {
|
||||
path := f.stripRoot(entry)
|
||||
out <- f.newFsObjectWithInfo(path, entry)
|
||||
parentPath = entry.Path[1:lastSlashIndex]
|
||||
}
|
||||
lastComponent := entry.Path[lastSlashIndex+1:]
|
||||
|
||||
if entry.IsDir {
|
||||
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
|
||||
} else {
|
||||
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
|
||||
if parentPathCorrectCase != nil {
|
||||
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
} else {
|
||||
nameTree.PutFile(parentPath, lastComponent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -307,6 +289,17 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
|
||||
cursor = deltaPage.Cursor.Cursor
|
||||
}
|
||||
}
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) {
|
||||
path := f.stripRoot("/" + caseCorrectFilePath)
|
||||
if path == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
return
|
||||
}
|
||||
|
||||
out <- f.newFsObjectWithInfo(*path, entry)
|
||||
}
|
||||
nameTree.WalkFiles(f.root, walkFunc)
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
@@ -327,15 +320,21 @@ func (f *FsDropbox) ListDir() fs.DirChan {
|
||||
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list directories in root: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
|
||||
} else {
|
||||
for i := range entry.Contents {
|
||||
entry := &entry.Contents[i]
|
||||
if entry.IsDir {
|
||||
name := f.stripRoot(entry.Path)
|
||||
if name == nil {
|
||||
// an error occurred and logged by stripRoot
|
||||
continue
|
||||
}
|
||||
|
||||
out <- &fs.Dir{
|
||||
Name: f.stripRoot(entry),
|
||||
Name: *name,
|
||||
When: time.Time(entry.ClientMtime),
|
||||
Bytes: int64(entry.Bytes),
|
||||
Bytes: entry.Bytes,
|
||||
Count: -1,
|
||||
}
|
||||
}
|
||||
@@ -399,8 +398,8 @@ func (f *FsDropbox) Rmdir() error {
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
func (fs *FsDropbox) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
func (f *FsDropbox) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -409,81 +408,11 @@ func (fs *FsDropbox) Precision() time.Duration {
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *FsDropbox) Purge() error {
|
||||
// Delete metadata first
|
||||
var wg sync.WaitGroup
|
||||
to_be_deleted := f.List()
|
||||
wg.Add(fs.Config.Transfers)
|
||||
for i := 0; i < fs.Config.Transfers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range to_be_deleted {
|
||||
o := dst.(*FsObjectDropbox)
|
||||
o.deleteMetadata()
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let dropbox delete the filesystem tree
|
||||
_, err := f.db.Delete(f.slashRoot)
|
||||
return err
|
||||
}
|
||||
|
||||
// Tries the transaction in fn then calls commit, repeating until retry limit
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) transaction(fn func() error) error {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return f.datastoreErr
|
||||
}
|
||||
var err error
|
||||
for i := 1; i <= maxCommitRetries; i++ {
|
||||
err = fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.datastore.Commit()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to commit metadata changes: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this key
|
||||
func (f *FsDropbox) deleteMetadata(key string) error {
|
||||
return f.transaction(func() error {
|
||||
record, err := f.table.Get(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get record: %s", err)
|
||||
}
|
||||
if record == nil {
|
||||
return nil
|
||||
}
|
||||
record.DeleteRecord()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Reads the record attached to key
|
||||
//
|
||||
// Holds datastore mutex while in progress
|
||||
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
|
||||
f.datastoreMutex.Lock()
|
||||
defer f.datastoreMutex.Unlock()
|
||||
if f.datastoreErr != nil {
|
||||
return nil, f.datastoreErr
|
||||
}
|
||||
return f.table.Get(key)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Return the parent Fs
|
||||
@@ -505,33 +434,8 @@ func (o *FsObjectDropbox) Remote() string {
|
||||
}
|
||||
|
||||
// Md5sum returns the Md5sum of an object returning a lowercase hex string
|
||||
//
|
||||
// FIXME has to download the file!
|
||||
func (o *FsObjectDropbox) Md5sum() (string, error) {
|
||||
if o.md5sum != "" {
|
||||
return o.md5sum, nil
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
return "", fmt.Errorf("Failed to read metadata: %s", err)
|
||||
|
||||
}
|
||||
|
||||
// For pre-existing files which have no md5sum can read it and set it?
|
||||
|
||||
// in, err := o.Open()
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// defer in.Close()
|
||||
// hash := md5.New()
|
||||
// _, err = io.Copy(hash, in)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.md5sum, nil
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -543,8 +447,9 @@ func (o *FsObjectDropbox) Size() int64 {
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
|
||||
o.bytes = int64(info.Bytes)
|
||||
o.bytes = info.Bytes
|
||||
o.modTime = time.Time(info.ClientMtime)
|
||||
o.hasMetadata = true
|
||||
}
|
||||
|
||||
// Reads the entry from dropbox
|
||||
@@ -592,55 +497,9 @@ func (o *FsObjectDropbox) metadataKey() string {
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *FsObjectDropbox) readMetaData() (err error) {
|
||||
if o.md5sum != "" {
|
||||
if o.hasMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// fs.Debug(o, "Reading metadata from datastore")
|
||||
record, err := o.dropbox.readRecord(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Debug(o, "Couldn't read metadata: %s", err)
|
||||
record = nil
|
||||
}
|
||||
|
||||
if record != nil {
|
||||
// Read md5sum
|
||||
md5sumInterface, ok, err := record.Get(md5sumField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find md5sum in record")
|
||||
} else {
|
||||
md5sum, ok := md5sumInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "md5sum not a string")
|
||||
} else {
|
||||
o.md5sum = md5sum
|
||||
}
|
||||
}
|
||||
|
||||
// read mtime
|
||||
mtimeInterface, ok, err := record.Get(mtimeField)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
fs.Debug(o, "Couldn't find mtime in record")
|
||||
} else {
|
||||
mtime, ok := mtimeInterface.(string)
|
||||
if !ok {
|
||||
fs.Debug(o, "mtime not a string")
|
||||
} else {
|
||||
modTime, err := time.Parse(timeFormatIn, mtime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modTime = modTime
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Last resort
|
||||
return o.readEntryAndSetMetadata()
|
||||
}
|
||||
@@ -658,59 +517,12 @@ func (o *FsObjectDropbox) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object into the record
|
||||
// FIXME if we don't set md5sum what will that do?
|
||||
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
|
||||
key := o.metadataKey()
|
||||
// fs.Debug(o, "Writing metadata to datastore")
|
||||
return o.dropbox.transaction(func() error {
|
||||
record, err := o.dropbox.table.GetOrInsert(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't read record: %s", err)
|
||||
}
|
||||
|
||||
if md5sum != "" {
|
||||
err = record.Set(md5sumField, md5sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set md5sum record: %s", err)
|
||||
}
|
||||
o.md5sum = md5sum
|
||||
}
|
||||
|
||||
if !modTime.IsZero() {
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
err := record.Set(mtimeField, mtime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't set mtime record: %s", err)
|
||||
}
|
||||
o.modTime = modTime
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Deletes the medadata associated with this file
|
||||
//
|
||||
// It logs any errors
|
||||
func (o *FsObjectDropbox) deleteMetadata() {
|
||||
fs.Debug(o, "Deleting metadata from datastore")
|
||||
err := o.dropbox.deleteMetadata(o.metadataKey())
|
||||
if err != nil {
|
||||
fs.Log(o, "Error deleting metadata: %v", err)
|
||||
fs.Stats.Error()
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
|
||||
err := o.setModTimeAndMd5sum(modTime, "")
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, err.Error())
|
||||
}
|
||||
// FIXME not implemented
|
||||
return
|
||||
}
|
||||
|
||||
// Is this object storable
|
||||
@@ -730,22 +542,16 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Calculate md5sum as we upload it
|
||||
hash := md5.New()
|
||||
rc := &readCloser{in: io.TeeReader(in, hash)}
|
||||
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
|
||||
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), uploadChunkSize, o.remotePath(), true, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Upload failed: %s", err)
|
||||
}
|
||||
o.setMetadataFromEntry(entry)
|
||||
|
||||
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
|
||||
return o.setModTimeAndMd5sum(modTime, md5sum)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectDropbox) Remove() error {
|
||||
o.deleteMetadata()
|
||||
_, err := o.dropbox.db.Delete(o.remotePath())
|
||||
return err
|
||||
}
|
||||
|
||||
179
dropbox/nametree.go
Normal file
179
dropbox/nametree.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stacktic/dropbox"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type NameTreeNode struct {
|
||||
// Map from lowercase directory name to tree node
|
||||
Directories map[string]*NameTreeNode
|
||||
|
||||
// Map from file name (case sensitive) to dropbox entry
|
||||
Files map[string]*dropbox.Entry
|
||||
|
||||
// Empty string if exact case is unknown or root node
|
||||
CaseCorrectName string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func newNameTreeNode(caseCorrectName string) *NameTreeNode {
|
||||
return &NameTreeNode{
|
||||
CaseCorrectName: caseCorrectName,
|
||||
Directories: make(map[string]*NameTreeNode),
|
||||
Files: make(map[string]*dropbox.Entry),
|
||||
}
|
||||
}
|
||||
|
||||
func NewNameTree() *NameTreeNode {
|
||||
return newNameTreeNode("")
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) String() string {
|
||||
if len(tree.CaseCorrectName) == 0 {
|
||||
return "NameTreeNode/<root>"
|
||||
} else {
|
||||
return fmt.Sprintf("NameTreeNode/%q", tree.CaseCorrectName)
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) getTreeNode(path string) *NameTreeNode {
|
||||
if len(path) == 0 {
|
||||
// no lookup required, just return root
|
||||
return tree
|
||||
}
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if len(component) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
lookup := current.Directories[lowercase]
|
||||
if lookup == nil {
|
||||
lookup = newNameTreeNode("")
|
||||
current.Directories[lowercase] = lookup
|
||||
}
|
||||
|
||||
current = lookup
|
||||
}
|
||||
|
||||
return current
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
|
||||
if len(caseCorrectDirectoryName) == 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
lowerCaseDirectoryName := strings.ToLower(caseCorrectDirectoryName)
|
||||
directory := node.Directories[lowerCaseDirectoryName]
|
||||
if directory == nil {
|
||||
directory = newNameTreeNode(caseCorrectDirectoryName)
|
||||
node.Directories[lowerCaseDirectoryName] = directory
|
||||
} else {
|
||||
if len(directory.CaseCorrectName) > 0 {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
directory.CaseCorrectName = caseCorrectDirectoryName
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) PutFile(parentPath string, caseCorrectFileName string, dropboxEntry *dropbox.Entry) {
|
||||
node := tree.getTreeNode(parentPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if node.Files[caseCorrectFileName] != nil {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
|
||||
return
|
||||
}
|
||||
|
||||
node.Files[caseCorrectFileName] = dropboxEntry
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) GetPathWithCorrectCase(path string) *string {
|
||||
if path == "" {
|
||||
empty := ""
|
||||
return &empty
|
||||
}
|
||||
|
||||
var result bytes.Buffer
|
||||
|
||||
current := tree
|
||||
for _, component := range strings.Split(path, "/") {
|
||||
if component == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
lowercase := strings.ToLower(component)
|
||||
|
||||
current = current.Directories[lowercase]
|
||||
if current == nil || current.CaseCorrectName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
result.WriteString("/")
|
||||
result.WriteString(current.CaseCorrectName)
|
||||
}
|
||||
|
||||
resultString := result.String()
|
||||
return &resultString
|
||||
}
|
||||
|
||||
type NameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry)
|
||||
|
||||
func (tree *NameTreeNode) walkFilesRec(currentPath string, walkFunc NameTreeFileWalkFunc) {
|
||||
var prefix string
|
||||
if currentPath == "" {
|
||||
prefix = ""
|
||||
} else {
|
||||
prefix = currentPath + "/"
|
||||
}
|
||||
|
||||
for name, entry := range tree.Files {
|
||||
walkFunc(prefix+name, entry)
|
||||
}
|
||||
|
||||
for lowerCaseName, directory := range tree.Directories {
|
||||
caseCorrectName := directory.CaseCorrectName
|
||||
if caseCorrectName == "" {
|
||||
fs.Stats.Error()
|
||||
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
|
||||
continue
|
||||
}
|
||||
|
||||
directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
|
||||
}
|
||||
}
|
||||
|
||||
func (tree *NameTreeNode) WalkFiles(rootPath string, walkFunc NameTreeFileWalkFunc) {
|
||||
node := tree.getTreeNode(rootPath)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
|
||||
node.walkFilesRec(rootPath, walkFunc)
|
||||
}
|
||||
124
dropbox/nametree_test.go
Normal file
124
dropbox/nametree_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"github.com/ncw/rclone/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
dropboxapi "github.com/stacktic/dropbox"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func assert(t *testing.T, shouldBeTrue bool, failMessage string) {
|
||||
if !shouldBeTrue {
|
||||
t.Fatal(failMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryName(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a/b", "C")
|
||||
|
||||
assert(t, tree.CaseCorrectName == "", "Root CaseCorrectName should be empty")
|
||||
|
||||
a := tree.Directories["a"]
|
||||
assert(t, a.CaseCorrectName == "", "CaseCorrectName at 'a' should be empty")
|
||||
|
||||
b := a.Directories["b"]
|
||||
assert(t, b.CaseCorrectName == "", "CaseCorrectName at 'a/b' should be empty")
|
||||
|
||||
c := b.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'a/b/c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyComponent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("/a", "C")
|
||||
tree.PutCaseCorrectDirectoryName("b/", "C")
|
||||
tree.PutCaseCorrectDirectoryName("a//b", "C")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+3, "3 errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutCaseCorrectDirectoryNameEmptyParent(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("", "C")
|
||||
|
||||
c := tree.Directories["c"]
|
||||
assert(t, c.CaseCorrectName == "C", "CaseCorrectName at 'c' should be 'C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestGetPathWithCorrectCase(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutCaseCorrectDirectoryName("a", "C")
|
||||
assert(t, tree.GetPathWithCorrectCase("a/c") == nil, "Path for 'a' should not be available")
|
||||
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
assert(t, *tree.GetPathWithCorrectCase("a/c") == "/A/C", "Path for 'a/c' should be '/A/C'")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalk(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkWithPrefix(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
tree.PutCaseCorrectDirectoryName("", "A")
|
||||
|
||||
numCalled := 0
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
|
||||
assert(t, entry.Path == "xxx", "entry.Path should be xxx")
|
||||
numCalled++
|
||||
}
|
||||
tree.WalkFiles("A", walkFunc)
|
||||
|
||||
assert(t, numCalled == 1, "walk func should be called only once")
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
|
||||
}
|
||||
|
||||
func TestPutAndWalkIncompleteTree(t *testing.T) {
|
||||
errors := fs.Stats.GetErrors()
|
||||
|
||||
tree := dropbox.NewNameTree()
|
||||
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
|
||||
|
||||
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) {
|
||||
t.Fatal("Should not be called")
|
||||
}
|
||||
tree.WalkFiles("", walkFunc)
|
||||
|
||||
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
|
||||
}
|
||||
@@ -124,6 +124,16 @@ func (s *StatsInfo) GetErrors() int64 {
|
||||
return s.errors
|
||||
}
|
||||
|
||||
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
|
||||
func (s *StatsInfo) ResetCounters() {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
s.bytes = 0
|
||||
s.errors = 0
|
||||
s.checks = 0
|
||||
s.transfers = 0
|
||||
}
|
||||
|
||||
// Errored returns whether there have been any errors
|
||||
func (s *StatsInfo) Errored() bool {
|
||||
s.lock.RLock()
|
||||
@@ -153,6 +163,13 @@ func (s *StatsInfo) DoneChecking(o Object) {
|
||||
s.checks += 1
|
||||
}
|
||||
|
||||
// GetTransfers reads the number of transfers
|
||||
func (s *StatsInfo) GetTransfers() int64 {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
return s.transfers
|
||||
}
|
||||
|
||||
// Transferring adds a transfer into the stats
|
||||
func (s *StatsInfo) Transferring(o Object) {
|
||||
s.lock.Lock()
|
||||
@@ -170,6 +187,12 @@ func (s *StatsInfo) DoneTransferring(o Object) {
|
||||
|
||||
// Account limits and accounts for one transfer
|
||||
type Account struct {
|
||||
// The mutex is to make sure Read() and Close() aren't called
|
||||
// concurrently. Unfortunately the persistent connection loop
|
||||
// in http transport calls Read() after Do() returns on
|
||||
// CancelRequest so this race can happen when it apparently
|
||||
// shouldn't.
|
||||
mu sync.Mutex
|
||||
in io.ReadCloser
|
||||
bytes int64
|
||||
}
|
||||
@@ -183,6 +206,8 @@ func NewAccount(in io.ReadCloser) *Account {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *Account) Read(p []byte) (n int, err error) {
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
n, err = file.in.Read(p)
|
||||
file.bytes += int64(n)
|
||||
Stats.Bytes(int64(n))
|
||||
@@ -198,6 +223,8 @@ func (file *Account) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Close the object
|
||||
func (file *Account) Close() error {
|
||||
file.mu.Lock()
|
||||
defer file.mu.Unlock()
|
||||
// FIXME do something?
|
||||
return file.in.Close()
|
||||
}
|
||||
|
||||
@@ -44,6 +44,8 @@ var (
|
||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||
@@ -119,6 +121,8 @@ type ConfigInfo struct {
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
DryRun bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
@@ -194,6 +198,8 @@ func LoadConfig() {
|
||||
Config.DryRun = *dryRun
|
||||
Config.Timeout = *timeout
|
||||
Config.ConnectTimeout = *connectTimeout
|
||||
Config.CheckSum = *checkSum
|
||||
Config.SizeOnly = *sizeOnly
|
||||
|
||||
ConfigPath = *configFile
|
||||
|
||||
@@ -201,7 +207,7 @@ func LoadConfig() {
|
||||
var err error
|
||||
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
|
||||
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
|
||||
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read null config file: %v", err)
|
||||
|
||||
13
fs/fs.go
13
fs/fs.go
@@ -1,5 +1,4 @@
|
||||
// File system interface
|
||||
|
||||
// Generic file system interface for rclone object storage systems
|
||||
package fs
|
||||
|
||||
import (
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
const (
|
||||
// User agent for Fs which can set it
|
||||
UserAgent = "rclone/" + Version
|
||||
// Very large precision value to show mod time isn't supported
|
||||
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -108,9 +109,11 @@ type Object interface {
|
||||
Remote() string
|
||||
|
||||
// Md5sum returns the md5 checksum of the file
|
||||
// If no Md5sum is available it returns ""
|
||||
Md5sum() (string, error)
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
ModTime() time.Time
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
@@ -265,6 +268,12 @@ func Log(o interface{}, text string, args ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Write error log output for this Object or Fs
|
||||
// Unconditionally logs a message regardless of Config.Quiet or Config.Verbose
|
||||
func ErrorLog(o interface{}, text string, args ...interface{}) {
|
||||
OutputLog(o, text, args...)
|
||||
}
|
||||
|
||||
// checkClose is a utility function used to check the return from
|
||||
// Close in a defer statement.
|
||||
func checkClose(c io.Closer, err *error) {
|
||||
|
||||
109
fs/operations.go
109
fs/operations.go
@@ -8,6 +8,7 @@ import (
|
||||
"mime"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Work out modify window for fses passed in - sets Config.ModifyWindow
|
||||
@@ -26,6 +27,14 @@ func CalculateModifyWindow(fs ...Fs) {
|
||||
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
|
||||
}
|
||||
|
||||
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
|
||||
func Md5sumsEqual(src, dst string) bool {
|
||||
if src == "" || dst == "" {
|
||||
return true
|
||||
}
|
||||
return src == dst
|
||||
}
|
||||
|
||||
// Check the two files to see if the MD5sums are the same
|
||||
//
|
||||
// May return an error which will already have been logged
|
||||
@@ -35,33 +44,34 @@ func CheckMd5sums(src, dst Object) (bool, error) {
|
||||
srcMd5, err := src.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to calculate src md5: %s", err)
|
||||
ErrorLog(src, "Failed to calculate src md5: %s", err)
|
||||
return false, err
|
||||
}
|
||||
dstMd5, err := dst.Md5sum()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to calculate dst md5: %s", err)
|
||||
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
|
||||
return false, err
|
||||
}
|
||||
// Debug("Src MD5 %s", srcMd5)
|
||||
// Debug("Dst MD5 %s", obj.Hash)
|
||||
return srcMd5 == dstMd5, nil
|
||||
return Md5sumsEqual(srcMd5, dstMd5), nil
|
||||
}
|
||||
|
||||
// Checks to see if the src and dst objects are equal by looking at
|
||||
// size, mtime and MD5SUM
|
||||
//
|
||||
// If the src and dst size are different then it is considered to be
|
||||
// not equal.
|
||||
// not equal. If --size-only is in effect then this is the only check
|
||||
// that is done.
|
||||
//
|
||||
// If the size is the same and the mtime is the same then it is
|
||||
// considered to be equal. This is the heuristic rsync uses when
|
||||
// not using --checksum.
|
||||
// considered to be equal. This check is skipped if using --checksum.
|
||||
//
|
||||
// If the size is the same and and mtime is different or unreadable
|
||||
// and the MD5SUM is the same then the file is considered to be equal.
|
||||
// In this case the mtime on the dst is updated.
|
||||
// If the size is the same and mtime is different, unreadable or
|
||||
// --checksum is set and the MD5SUM is the same then the file is
|
||||
// considered to be equal. In this case the mtime on the dst is
|
||||
// updated if --checksum is not set.
|
||||
//
|
||||
// Otherwise the file is considered to be not equal including if there
|
||||
// were errors reading info.
|
||||
@@ -70,19 +80,26 @@ func Equal(src, dst Object) bool {
|
||||
Debug(src, "Sizes differ")
|
||||
return false
|
||||
}
|
||||
|
||||
// Size the same so check the mtime
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
if Config.SizeOnly {
|
||||
Debug(src, "Sizes identical")
|
||||
return true
|
||||
}
|
||||
|
||||
var srcModTime time.Time
|
||||
if !Config.CheckSum {
|
||||
// Size the same so check the mtime
|
||||
srcModTime = src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
ModifyWindow := Config.ModifyWindow
|
||||
if dt >= ModifyWindow || dt <= -ModifyWindow {
|
||||
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
||||
} else {
|
||||
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// mtime is unreadable or different but size is the same so
|
||||
// check the MD5SUM
|
||||
same, _ := CheckMd5sums(src, dst)
|
||||
@@ -91,9 +108,11 @@ func Equal(src, dst Object) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
if !Config.CheckSum {
|
||||
// Size and MD5 the same but mtime different so update the
|
||||
// mtime of the dst object here
|
||||
dst.SetModTime(srcModTime)
|
||||
}
|
||||
|
||||
Debug(src, "Size and MD5SUM of src and dst objects identical")
|
||||
return true
|
||||
@@ -137,7 +156,7 @@ tryAgain:
|
||||
in0, err := src.Open()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to open: %s", err)
|
||||
ErrorLog(src, "Failed to open: %s", err)
|
||||
return
|
||||
}
|
||||
in := NewAccount(in0) // account the transfer
|
||||
@@ -167,7 +186,7 @@ tryAgain:
|
||||
}
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to copy: %s", err)
|
||||
ErrorLog(src, "Failed to copy: %s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
@@ -176,27 +195,29 @@ tryAgain:
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
Log(dst, "%s", err)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify md5sums are the same after transfer - ignoring blank md5sums
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if !Config.SizeOnly {
|
||||
srcMd5sum, md5sumErr := src.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
Log(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if srcMd5sum != "" {
|
||||
dstMd5sum, md5sumErr := dst.Md5sum()
|
||||
if md5sumErr != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
|
||||
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
|
||||
Stats.Error()
|
||||
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
|
||||
ErrorLog(dst, "%s", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,7 +288,7 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
|
||||
Stats.DoneChecking(dst)
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
Log(dst, "Couldn't delete: %s", err)
|
||||
ErrorLog(dst, "Couldn't delete: %s", err)
|
||||
} else {
|
||||
Debug(dst, "Deleted")
|
||||
}
|
||||
@@ -392,13 +413,13 @@ func Check(fdst, fsrc Fs) error {
|
||||
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
|
||||
for _, dst := range dstFiles {
|
||||
Stats.Error()
|
||||
Log(dst, "File not in %v", fsrc)
|
||||
ErrorLog(dst, "File not in %v", fsrc)
|
||||
}
|
||||
|
||||
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
|
||||
for _, src := range srcFiles {
|
||||
Stats.Error()
|
||||
Log(src, "File not in %v", fdst)
|
||||
ErrorLog(src, "File not in %v", fdst)
|
||||
}
|
||||
|
||||
checks := make(chan []Object, Config.Transfers)
|
||||
@@ -420,7 +441,7 @@ func Check(fdst, fsrc Fs) error {
|
||||
if src.Size() != dst.Size() {
|
||||
Stats.DoneChecking(src)
|
||||
Stats.Error()
|
||||
Log(src, "Sizes differ")
|
||||
ErrorLog(src, "Sizes differ")
|
||||
continue
|
||||
}
|
||||
same, err := CheckMd5sums(src, dst)
|
||||
@@ -430,7 +451,7 @@ func Check(fdst, fsrc Fs) error {
|
||||
}
|
||||
if !same {
|
||||
Stats.Error()
|
||||
Log(src, "Md5sums differ")
|
||||
ErrorLog(src, "Md5sums differ")
|
||||
}
|
||||
Debug(src, "OK")
|
||||
}
|
||||
@@ -512,7 +533,7 @@ func Md5sum(f Fs, w io.Writer) error {
|
||||
Stats.DoneChecking(o)
|
||||
if err != nil {
|
||||
Debug(o, "Failed to read MD5: %v", err)
|
||||
md5sum = "UNKNOWN"
|
||||
md5sum = "ERROR"
|
||||
}
|
||||
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
|
||||
})
|
||||
|
||||
@@ -169,6 +169,112 @@ func TestCopyRedownload(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
WriteFile("check sum", "", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t1, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
err = os.Chtimes(localName+"/check sum", t2, t2)
|
||||
if err != nil {
|
||||
t.Fatalf("Chtimes failed: %v", err)
|
||||
}
|
||||
local_items = []fstest.Item{
|
||||
{Path: "check sum", Size: 0, ModTime: t2, Md5sum: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
cleanTempDir(t)
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
WriteFile("sizeonly", "potato", t1)
|
||||
local_items := []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t1, Md5sum: "8ee2027983915ec78acc45027d874316"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
if fs.Stats.GetTransfers() != 1 {
|
||||
t.Fatalf("Sync 1: want 1 transfer, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
remote_items := local_items
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
WriteFile("sizeonly", "POTATO", t2)
|
||||
local_items = []fstest.Item{
|
||||
{Path: "sizeonly", Size: 6, ModTime: t2, Md5sum: "8ac6f27a282e4938125482607ccfb55f"},
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(fremote, flocal, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Sync failed: %v", err)
|
||||
}
|
||||
|
||||
// We should have transferred no files
|
||||
if fs.Stats.GetTransfers() != 0 {
|
||||
t.Fatalf("Sync 2: want 0 transfers, got %d", fs.Stats.GetTransfers())
|
||||
}
|
||||
|
||||
fstest.CheckListingWithPrecision(t, flocal, local_items, fs.Config.ModifyWindow)
|
||||
fstest.CheckListingWithPrecision(t, fremote, remote_items, fs.Config.ModifyWindow)
|
||||
|
||||
cleanTempDir(t)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
WriteFile("empty space", "", t1)
|
||||
|
||||
@@ -292,13 +398,37 @@ func TestLsLong(t *testing.T) {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 2011-12-25 12:59:59\.\d{9} empty space$`)
|
||||
if !m1.MatchString(res) {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
lines := strings.Split(strings.Trim(res, "\n"), "\n")
|
||||
if len(lines) != 2 {
|
||||
t.Fatalf("Wrong number of lines in list: %q", lines)
|
||||
}
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 2001-02-03 04:05:06\.\d{9} potato2$`)
|
||||
if !m2.MatchString(res) {
|
||||
|
||||
timeFormat := "2006-01-02 15:04:05.000000000"
|
||||
precision := fremote.Precision()
|
||||
checkTime := func(m, filename string, expected time.Time) {
|
||||
modTime, err := time.Parse(timeFormat, m)
|
||||
if err != nil {
|
||||
t.Errorf("Error parsing %q: %v", m, err)
|
||||
} else {
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
|
||||
if ms := m1.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "empty space", t2.Local())
|
||||
}
|
||||
|
||||
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
|
||||
if ms := m2.FindStringSubmatch(res); ms == nil {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
} else {
|
||||
checkTime(ms[1], "potato2", t1.Local())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,10 +439,12 @@ func TestMd5sum(t *testing.T) {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
res := buf.String()
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") {
|
||||
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
|
||||
!strings.Contains(res, " empty space\n") {
|
||||
t.Errorf("empty space missing: %q", res)
|
||||
}
|
||||
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") {
|
||||
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
|
||||
!strings.Contains(res, " potato2\n") {
|
||||
t.Errorf("potato2 missing: %q", res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package fs
|
||||
|
||||
const Version = "v1.13"
|
||||
const Version = "v1.18"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Utilities for testing the fs
|
||||
|
||||
// Utilities for testing the Fs
|
||||
package fstest
|
||||
|
||||
// FIXME put name of test FS in Fs structure
|
||||
@@ -31,10 +30,19 @@ type Item struct {
|
||||
Size int64
|
||||
}
|
||||
|
||||
// Checks the times are equal within the precision, returns the delta and a flag
|
||||
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
// check the mod time to the given precision
|
||||
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
|
||||
dt := modTime.Sub(i.ModTime)
|
||||
if dt >= precision || dt <= -precision {
|
||||
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
|
||||
if !ok {
|
||||
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
|
||||
}
|
||||
}
|
||||
@@ -48,7 +56,7 @@ func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
|
||||
}
|
||||
if i.Md5sum != Md5sum {
|
||||
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
|
||||
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
|
||||
}
|
||||
if i.Size != obj.Size() {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
@@ -32,11 +33,16 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
var err error
|
||||
fs.LoadConfig()
|
||||
fs.Config.Verbose = false
|
||||
fs.Config.Quiet = true
|
||||
t.Logf("Using remote %q", RemoteName)
|
||||
if RemoteName == "" {
|
||||
RemoteName, err = fstest.LocalRemote()
|
||||
if err != nil {
|
||||
@@ -252,6 +258,9 @@ func TestFsRmdirFull(t *testing.T) {
|
||||
func TestFsPrecision(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
precision := remote.Precision()
|
||||
if precision == fs.ModTimeNotSupported {
|
||||
return
|
||||
}
|
||||
if precision > time.Second || precision < 0 {
|
||||
t.Fatalf("Precision out of range %v", precision)
|
||||
}
|
||||
@@ -295,7 +304,7 @@ func TestObjectMd5sum(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error in Md5sum: %v", err)
|
||||
}
|
||||
if Md5sum != file1.Md5sum {
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
@@ -345,7 +354,7 @@ func TestObjectOpen(t *testing.T) {
|
||||
t.Fatalf("in.Close() return error: %v", err)
|
||||
}
|
||||
Md5sum := hex.EncodeToString(hash.Sum(nil))
|
||||
if Md5sum != file1.Md5sum {
|
||||
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
|
||||
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ const (
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageAuth = &googleauth.Auth{
|
||||
Scope: storage.DevstorageFull_controlScope,
|
||||
Scope: storage.DevstorageFullControlScope,
|
||||
DefaultClientId: rcloneClientId,
|
||||
DefaultClientSecret: rcloneClientSecret,
|
||||
}
|
||||
@@ -251,7 +251,7 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
|
||||
objects, err := list.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
return
|
||||
}
|
||||
if !directories {
|
||||
@@ -286,7 +286,7 @@ func (f *FsStorage) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
@@ -310,7 +310,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
defer close(out)
|
||||
if f.projectNumber == "" {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list buckets without project number")
|
||||
fs.ErrorLog(f, "Can't list buckets without project number")
|
||||
return
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
@@ -318,7 +318,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
|
||||
buckets, err := listBuckets.Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
|
||||
break
|
||||
} else {
|
||||
for _, bucket := range buckets.Items {
|
||||
@@ -505,7 +505,7 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
|
||||
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
o.setMetaData(newObject)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
@@ -32,14 +33,15 @@ func init() {
|
||||
|
||||
// FsLocal represents a local filesystem rooted at root
|
||||
type FsLocal struct {
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
root string // The root directory
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
}
|
||||
|
||||
// FsObjectLocal represents a local filesystem object
|
||||
type FsObjectLocal struct {
|
||||
local fs.Fs // The Fs this object is part of
|
||||
local *FsLocal // The Fs this object is part of
|
||||
remote string // The remote path
|
||||
path string // The local path
|
||||
info os.FileInfo // Interface for file info (always present)
|
||||
@@ -51,7 +53,10 @@ type FsObjectLocal struct {
|
||||
// NewFs contstructs an FsLocal from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = filepath.ToSlash(path.Clean(root))
|
||||
f := &FsLocal{root: root}
|
||||
f := &FsLocal{
|
||||
root: root,
|
||||
warned: make(map[string]struct{}),
|
||||
}
|
||||
// Check to see if this points to a file
|
||||
fi, err := os.Lstat(f.root)
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
@@ -105,12 +110,12 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
remote, err := filepath.Rel(f.root, path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to get relative path %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
if remote == "." {
|
||||
@@ -127,13 +132,27 @@ func (f *FsLocal) List() fs.ObjectsChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", f.root, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
|
||||
}
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// CleanUtf8 makes string a valid UTF-8 string
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
func (f *FsLocal) cleanUtf8(name string) string {
|
||||
if utf8.ValidString(name) {
|
||||
return name
|
||||
}
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
return string([]rune(name))
|
||||
}
|
||||
|
||||
// Walk the path returning a channel of FsObjects
|
||||
func (f *FsLocal) ListDir() fs.DirChan {
|
||||
out := make(fs.DirChan, fs.Config.Checkers)
|
||||
@@ -142,12 +161,12 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
items, err := ioutil.ReadDir(f.root)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't find read directory: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
|
||||
} else {
|
||||
for _, item := range items {
|
||||
if item.IsDir() {
|
||||
dir := &fs.Dir{
|
||||
Name: item.Name(),
|
||||
Name: f.cleanUtf8(item.Name()),
|
||||
When: item.ModTime(),
|
||||
Bytes: 0,
|
||||
Count: 0,
|
||||
@@ -157,7 +176,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", path, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
|
||||
} else {
|
||||
dir.Count += 1
|
||||
dir.Bytes += fi.Size()
|
||||
@@ -166,7 +185,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Failed to open directory: %s: %s", dirpath, err)
|
||||
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
|
||||
}
|
||||
out <- dir
|
||||
}
|
||||
@@ -191,7 +210,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *FsLocal) Mkdir() error {
|
||||
return os.MkdirAll(f.root, 0770)
|
||||
return os.MkdirAll(f.root, 0777)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory
|
||||
@@ -294,7 +313,7 @@ func (o *FsObjectLocal) String() string {
|
||||
|
||||
// Return the remote path
|
||||
func (o *FsObjectLocal) Remote() string {
|
||||
return o.remote
|
||||
return o.local.cleanUtf8(o.remote)
|
||||
}
|
||||
|
||||
// Md5sum calculates the Md5sum of a file returning a lowercase hex string
|
||||
@@ -305,7 +324,7 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to open: %s", err)
|
||||
fs.ErrorLog(o, "Failed to open: %s", err)
|
||||
return "", err
|
||||
}
|
||||
hash := md5.New()
|
||||
@@ -313,12 +332,12 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read: %s", err)
|
||||
return "", err
|
||||
}
|
||||
if closeErr != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to close: %s", closeErr)
|
||||
fs.ErrorLog(o, "Failed to close: %s", closeErr)
|
||||
return "", closeErr
|
||||
}
|
||||
o.md5sum = hex.EncodeToString(hash.Sum(nil))
|
||||
@@ -410,7 +429,7 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
|
||||
// Update the object from in with modTime and size
|
||||
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
dir := path.Dir(o.path)
|
||||
err := os.MkdirAll(dir, 0770)
|
||||
err := os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
78
make_manual.py
Executable file
78
make_manual.py
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/python
|
||||
"""
|
||||
Make single page versions of the documentation for release and
|
||||
conversion into man pages etc.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
outfile = "MANUAL.md"
|
||||
|
||||
# Order to add docs segments to make outfile
|
||||
docs = [
|
||||
"about.md",
|
||||
"install.md",
|
||||
"docs.md",
|
||||
"drive.md",
|
||||
"s3.md",
|
||||
"swift.md",
|
||||
"dropbox.md",
|
||||
"googlecloudstorage.md",
|
||||
"local.md",
|
||||
"changelog.md",
|
||||
"bugs.md",
|
||||
"faq.md",
|
||||
"licence.md",
|
||||
"authors.md",
|
||||
"contact.md",
|
||||
]
|
||||
|
||||
# Docs which aren't made into outfile
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
"""Read file as a string"""
|
||||
path = os.path.join(docpath, doc)
|
||||
with open(path) as fd:
|
||||
contents = fd.read()
|
||||
parts = contents.split("---\n", 2)
|
||||
if len(parts) != 3:
|
||||
raise ValueError("Couldn't find --- markers: found %d parts" % len(parts))
|
||||
contents = parts[2].strip()+"\n\n"
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(http://rclone.org\1)", contents)
|
||||
return contents
|
||||
|
||||
def check_docs(docpath):
|
||||
"""Check all the docs are in docpath"""
|
||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||
files -= set(ignore_docs)
|
||||
docs_set = set(docs)
|
||||
if files == docs_set:
|
||||
return
|
||||
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
|
||||
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
|
||||
raise ValueError("Missing files")
|
||||
|
||||
def main():
|
||||
check_docs(docpath)
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % datetime.now().strftime("%b %d, %Y"))
|
||||
for doc in docs:
|
||||
out.write(read_doc(doc))
|
||||
print "Written '%s'" % outfile
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,3 +1,11 @@
|
||||
Perhaps make Md5sum() and Modtime() optional. Define the zero values
|
||||
"" and 0. Make it so we can support remotes which can't do those.
|
||||
|
||||
Fix the docs
|
||||
* factor the README.md into the docs directory
|
||||
* create it as part of make by assembling other parts
|
||||
* write long docs about each flag
|
||||
|
||||
Change lsd command so it doesn't show -1
|
||||
* Make sure all Fses show -1 for objects Zero for dates etc
|
||||
* Make test?
|
||||
@@ -7,6 +15,7 @@ Make test_all.sh use the TestRemote name automatically
|
||||
|
||||
Run errcheck and go vet in the make file
|
||||
.. Also race detector?
|
||||
.. go tool vet -shadow
|
||||
|
||||
Get rid of Storable?
|
||||
|
||||
|
||||
@@ -377,7 +377,7 @@ func main() {
|
||||
// Run the actual command
|
||||
if command.Run != nil {
|
||||
command.Run(fdst, fsrc)
|
||||
if !command.NoStats {
|
||||
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
|
||||
fmt.Fprintln(os.Stderr, fs.Stats)
|
||||
}
|
||||
if fs.Config.Verbose {
|
||||
|
||||
377
s3/s3.go
377
s3/s3.go
@@ -3,19 +3,31 @@ package s3
|
||||
|
||||
// FIXME need to prevent anything but ListDir working for s3://
|
||||
|
||||
/*
|
||||
Progress of port to aws-sdk
|
||||
|
||||
* Don't really need o.meta at all?
|
||||
|
||||
What happens if you CTRL-C a multipart upload
|
||||
* get an incomplete upload
|
||||
* disappears when you delete the bucket
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/goamz/aws"
|
||||
"github.com/ncw/goamz/s3"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/service"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
@@ -33,39 +45,48 @@ func init() {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password). ",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.",
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://s3.amazonaws.com/",
|
||||
Value: "us-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-external-1.amazonaws.com",
|
||||
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-2.amazonaws.com",
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "https://s3-us-west-1.amazonaws.com",
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-eu-west-1.amazonaws.com",
|
||||
Value: "eu-west-1",
|
||||
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-1.amazonaws.com",
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-ap-southeast-2.amazonaws.com",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
|
||||
Value: "ap-southeast-2",
|
||||
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
|
||||
}, {
|
||||
Value: "https://s3-ap-northeast-1.amazonaws.com",
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
|
||||
}, {
|
||||
Value: "https://s3-sa-east-1.amazonaws.com",
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "other-v2-signature",
|
||||
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
|
||||
}, {
|
||||
Value: "other-v4-signature",
|
||||
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Endpoint.",
|
||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
|
||||
@@ -100,17 +121,18 @@ func init() {
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
|
||||
listChunkSize = 1024 // number of items to read at once
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
listChunkSize = 1024 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
)
|
||||
|
||||
// FsS3 represents a remote s3 server
|
||||
type FsS3 struct {
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
b *s3.Bucket // the connection to the bucket
|
||||
bucket string // the bucket we are working on
|
||||
perm s3.ACL // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
bucket string // the bucket we are working on
|
||||
perm string // permissions for new buckets / objects
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
locationConstraint string // location constraint of new buckets
|
||||
}
|
||||
|
||||
// FsObjectS3 describes a s3 object
|
||||
@@ -119,12 +141,12 @@ type FsObjectS3 struct {
|
||||
//
|
||||
// List will read everything but meta - to fill that in need to call
|
||||
// readMetaData
|
||||
s3 *FsS3 // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta s3.Headers // The object metadata if known - may be nil
|
||||
s3 *FsS3 // what this object is part of
|
||||
remote string // The remote path
|
||||
etag string // md5sum of the object
|
||||
bytes int64 // size of the object
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -163,28 +185,42 @@ func s3Connection(name string) (*s3.S3, error) {
|
||||
if secretAccessKey == "" {
|
||||
return nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
|
||||
auth := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
|
||||
|
||||
// FIXME look through all the regions by name and use one of them if found
|
||||
|
||||
// Synthesize the region
|
||||
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
if s3Endpoint == "" {
|
||||
s3Endpoint = "https://s3.amazonaws.com/"
|
||||
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
|
||||
region := fs.ConfigFile.MustValue(name, "region")
|
||||
if region == "" && endpoint == "" {
|
||||
endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
region := aws.Region{
|
||||
Name: "s3",
|
||||
S3Endpoint: s3Endpoint,
|
||||
S3LocationConstraint: false,
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
}
|
||||
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
|
||||
if s3LocationConstraint != "" {
|
||||
region.Name = s3LocationConstraint
|
||||
region.S3LocationConstraint = true
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(auth).
|
||||
WithEndpoint(endpoint).
|
||||
WithHTTPClient(fs.Config.Client()).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
c := s3.New(awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debug(name, "Using v2 auth")
|
||||
signer := func(req *service.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
sign(accessKeyId, secretAccessKey, req.HTTPRequest)
|
||||
}
|
||||
c.Handlers.Sign.Clear()
|
||||
c.Handlers.Sign.PushBack(service.BuildContentLength)
|
||||
c.Handlers.Sign.PushBack(signer)
|
||||
}
|
||||
|
||||
c := s3.New(auth, region)
|
||||
c.Client = fs.Config.Client()
|
||||
// Add user agent
|
||||
c.Handlers.Build.PushBack(func(r *service.Request) {
|
||||
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
|
||||
})
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -201,14 +237,18 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &FsS3{
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
b: c.Bucket(bucket),
|
||||
perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
// FIXME perm: s3.Private, // FIXME need user to specify
|
||||
root: directory,
|
||||
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.b.Head(directory, nil)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
Key: &directory,
|
||||
}
|
||||
_, err = f.c.HeadObject(&req)
|
||||
if err == nil {
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
@@ -222,27 +262,28 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return fs.NewLimited(f, obj), nil
|
||||
}
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an FsObject from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
|
||||
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
|
||||
o := &FsObjectS3{
|
||||
s3: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
var err error
|
||||
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read last modified: %s", err)
|
||||
if info.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *info.LastModified
|
||||
}
|
||||
o.etag = info.ETag
|
||||
o.bytes = info.Size
|
||||
o.etag = aws.StringValue(info.ETag)
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -263,50 +304,66 @@ func (f *FsS3) NewFsObject(remote string) fs.Object {
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// If directories is set it only sends directories
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
|
||||
func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
if directories {
|
||||
delimiter = "/"
|
||||
}
|
||||
marker := ""
|
||||
var marker *string
|
||||
for {
|
||||
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &f.bucket,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &f.root,
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
resp, err := f.c.ListObjects(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
|
||||
break
|
||||
} else {
|
||||
rootLength := len(f.root)
|
||||
if directories {
|
||||
for _, remote := range objects.CommonPrefixes {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
fs.Log(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Log(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := remote[rootLength:]
|
||||
remote = remote[rootLength:]
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
fn(remote, &s3.Key{Key: remote})
|
||||
fn(remote, &s3.Object{Key: &remote})
|
||||
}
|
||||
} else {
|
||||
for i := range objects.Contents {
|
||||
object := &objects.Contents[i]
|
||||
if !strings.HasPrefix(object.Key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", object.Key)
|
||||
for _, object := range resp.Contents {
|
||||
key := aws.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Log(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote := object.Key[rootLength:]
|
||||
remote := key[rootLength:]
|
||||
fn(remote, object)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !objects.IsTruncated {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
marker = objects.NextMarker
|
||||
if marker == "" {
|
||||
marker = objects.Contents[len(objects.Contents)-1].Key
|
||||
if !aws.BoolValue(resp.IsTruncated) {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
||||
marker = resp.Contents[len(resp.Contents)-1].Key
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -318,11 +375,11 @@ func (f *FsS3) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
|
||||
} else {
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(false, func(remote string, object *s3.Key) {
|
||||
f.list(false, func(remote string, object *s3.Object) {
|
||||
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
|
||||
out <- fs
|
||||
}
|
||||
@@ -339,15 +396,16 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
// List the buckets
|
||||
go func() {
|
||||
defer close(out)
|
||||
buckets, err := f.c.ListBuckets()
|
||||
req := s3.ListBucketsInput{}
|
||||
resp, err := f.c.ListBuckets(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list buckets: %s", err)
|
||||
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
|
||||
} else {
|
||||
for _, bucket := range buckets {
|
||||
for _, bucket := range resp.Buckets {
|
||||
out <- &fs.Dir{
|
||||
Name: bucket.Name,
|
||||
When: bucket.CreationDate,
|
||||
Name: aws.StringValue(bucket.Name),
|
||||
When: aws.TimeValue(bucket.CreationDate),
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
@@ -358,10 +416,14 @@ func (f *FsS3) ListDir() fs.DirChan {
|
||||
// List the directories in the path in the bucket
|
||||
go func() {
|
||||
defer close(out)
|
||||
f.list(true, func(remote string, object *s3.Key) {
|
||||
f.list(true, func(remote string, object *s3.Object) {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
out <- &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: object.Size,
|
||||
Bytes: size,
|
||||
Count: 0,
|
||||
}
|
||||
})
|
||||
@@ -379,9 +441,18 @@ func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *FsS3) Mkdir() error {
|
||||
err := f.b.PutBucket(f.perm)
|
||||
if err, ok := err.(*s3.Error); ok {
|
||||
if err.Code == "BucketAlreadyOwnedByYou" {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.perm,
|
||||
}
|
||||
if f.locationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.locationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -392,7 +463,11 @@ func (f *FsS3) Mkdir() error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *FsS3) Rmdir() error {
|
||||
return f.b.DelBucket()
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
_, err := f.c.DeleteBucket(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the precision
|
||||
@@ -427,7 +502,7 @@ func (o *FsObjectS3) Md5sum() (string, error) {
|
||||
etag := strings.Trim(strings.ToLower(o.etag), `"`)
|
||||
// Check the etag is a valid md5sum
|
||||
if !matchMd5.MatchString(etag) {
|
||||
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
||||
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
|
||||
return "", nil
|
||||
}
|
||||
return etag, nil
|
||||
@@ -440,28 +515,17 @@ func (o *FsObjectS3) Size() int64 {
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// if we get a 404 error then we retry a few times for eventual
|
||||
// consistency reasons
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *FsObjectS3) readMetaData() (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
var headers s3.Headers
|
||||
|
||||
// Try reading the metadata a few times (with exponential
|
||||
// backoff) to get around eventual consistency on 404 error
|
||||
for tries := uint(0); tries < 10; tries++ {
|
||||
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
|
||||
if s3Err, ok := err.(*s3.Error); ok {
|
||||
if s3Err.StatusCode == http.StatusNotFound {
|
||||
time.Sleep(5 * time.Millisecond << tries)
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.HeadObject(&req)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read info: %s", err)
|
||||
return err
|
||||
@@ -469,19 +533,17 @@ func (o *FsObjectS3) readMetaData() (err error) {
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
if contentLength, ok := headers["Content-Length"]; ok {
|
||||
size, err = strconv.ParseInt(contentLength, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debug(o, "Failed to read size from: %q", headers)
|
||||
return err
|
||||
}
|
||||
if resp.ContentLength != nil {
|
||||
size = *resp.ContentLength
|
||||
}
|
||||
o.etag = headers["Etag"]
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = headers
|
||||
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
|
||||
o.meta = resp.Metadata
|
||||
if resp.LastModified == nil {
|
||||
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -498,11 +560,11 @@ func (o *FsObjectS3) ModTime() time.Time {
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok {
|
||||
if !ok || d == nil {
|
||||
// fs.Debug(o, "No metadata")
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
modTime, err := swift.FloatStringToTime(*d)
|
||||
if err != nil {
|
||||
fs.Log(o, "Failed to read mtime from object: %s", err)
|
||||
return o.lastModified
|
||||
@@ -515,14 +577,27 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
key := o.s3.root + o.remote
|
||||
sourceKey := o.s3.bucket + "/" + key
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Key: &key,
|
||||
CopySource: &sourceKey,
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
_, err = o.s3.c.CopyObject(&req)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -533,21 +608,51 @@ func (o *FsObjectS3) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
|
||||
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
|
||||
return
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
resp, err := o.s3.c.GetObject(&req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
// Set the mtime in the headers
|
||||
headers := s3.Headers{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
opts := s3manager.UploadOptions{
|
||||
// PartSize: 64 * 1024 * 1024, use default
|
||||
Concurrency: 2, // limit concurrency
|
||||
LeavePartsOnError: false,
|
||||
S3: o.s3.c,
|
||||
}
|
||||
uploader := s3manager.NewUploader(&opts)
|
||||
|
||||
// Set the mtime in the meta data
|
||||
metadata := map[string]*string{
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
|
||||
// Guess the content type
|
||||
contentType := fs.MimeType(o)
|
||||
|
||||
key := o.s3.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
ACL: &o.s3.perm,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &contentType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
_, err := uploader.Upload(&req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
err = o.readMetaData()
|
||||
@@ -556,7 +661,13 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
|
||||
|
||||
// Remove an object
|
||||
func (o *FsObjectS3) Remove() error {
|
||||
return o.s3.b.Del(o.s3.root + o.remote)
|
||||
key := o.s3.root + o.remote
|
||||
req := s3.DeleteObjectInput{
|
||||
Bucket: &o.s3.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
_, err := o.s3.c.DeleteObject(&req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
115
s3/v2sign.go
Normal file
115
s3/v2sign.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// v2 signing
|
||||
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// URL parameters that need to be added to the signature
|
||||
var s3ParamsToSign = map[string]struct{}{
|
||||
"acl": struct{}{},
|
||||
"location": struct{}{},
|
||||
"logging": struct{}{},
|
||||
"notification": struct{}{},
|
||||
"partNumber": struct{}{},
|
||||
"policy": struct{}{},
|
||||
"requestPayment": struct{}{},
|
||||
"torrent": struct{}{},
|
||||
"uploadId": struct{}{},
|
||||
"uploads": struct{}{},
|
||||
"versionId": struct{}{},
|
||||
"versioning": struct{}{},
|
||||
"versions": struct{}{},
|
||||
"response-content-type": struct{}{},
|
||||
"response-content-language": struct{}{},
|
||||
"response-expires": struct{}{},
|
||||
"response-cache-control": struct{}{},
|
||||
"response-content-disposition": struct{}{},
|
||||
"response-content-encoding": struct{}{},
|
||||
}
|
||||
|
||||
// sign signs requests using v2 auth
|
||||
//
|
||||
// Cobbled together from goamz and aws-sdk-go
|
||||
func sign(AccessKey, SecretKey string, req *http.Request) {
|
||||
// Set date
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
// Sort out URI
|
||||
uri := req.URL.Opaque
|
||||
if uri != "" {
|
||||
if strings.HasPrefix(uri, "//") {
|
||||
// Strip off //host/uri
|
||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
|
||||
}
|
||||
} else {
|
||||
uri = req.URL.Path
|
||||
}
|
||||
if uri == "" {
|
||||
uri = "/"
|
||||
}
|
||||
|
||||
// Look through headers of interest
|
||||
var md5 string
|
||||
var contentType string
|
||||
var headersToSign []string
|
||||
for k, v := range req.Header {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "content-md5":
|
||||
md5 = v[0]
|
||||
case "content-type":
|
||||
contentType = v[0]
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
vall := strings.Join(v, ",")
|
||||
headersToSign = append(headersToSign, k+":"+vall)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Make headers of interest into canonical string
|
||||
var joinedHeadersToSign string
|
||||
if len(headersToSign) > 0 {
|
||||
sort.StringSlice(headersToSign).Sort()
|
||||
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
|
||||
}
|
||||
|
||||
// Look for query parameters which need to be added to the signature
|
||||
params := req.URL.Query()
|
||||
var queriesToSign []string
|
||||
for k, vs := range params {
|
||||
if _, ok := s3ParamsToSign[k]; ok {
|
||||
for _, v := range vs {
|
||||
if v == "" {
|
||||
queriesToSign = append(queriesToSign, k)
|
||||
} else {
|
||||
queriesToSign = append(queriesToSign, k+"="+v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add query parameters to URI
|
||||
if len(queriesToSign) > 0 {
|
||||
sort.StringSlice(queriesToSign).Sort()
|
||||
uri += "?" + strings.Join(queriesToSign, "&")
|
||||
}
|
||||
|
||||
// Make signature
|
||||
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
|
||||
hash := hmac.New(sha1.New, []byte(SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
|
||||
base64.StdEncoding.Encode(signature, hash.Sum(nil))
|
||||
|
||||
// Set signature in request
|
||||
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
|
||||
})
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
|
||||
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (f *FsSwift) List() fs.ObjectsChan {
|
||||
// Return no objects at top level list
|
||||
close(out)
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Can't list objects at root - choose a container using lsd")
|
||||
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
|
||||
} else {
|
||||
// List the objects
|
||||
go func() {
|
||||
@@ -266,7 +266,7 @@ func (f *FsSwift) ListDir() fs.DirChan {
|
||||
containers, err := f.c.ContainersAll(nil)
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(f, "Couldn't list containers: %v", err)
|
||||
fs.ErrorLog(f, "Couldn't list containers: %v", err)
|
||||
} else {
|
||||
for _, container := range containers {
|
||||
out <- &fs.Dir{
|
||||
@@ -393,14 +393,14 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to read metadata: %s", err)
|
||||
fs.ErrorLog(o, "Failed to read metadata: %s", err)
|
||||
return
|
||||
}
|
||||
o.meta.SetModTime(modTime)
|
||||
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
|
||||
if err != nil {
|
||||
fs.Stats.Error()
|
||||
fs.Log(o, "Failed to update remote mtime: %s", err)
|
||||
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user