1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-12 05:23:55 +00:00

Compare commits

..

22 Commits
v1.17 ... v1.18

Author SHA1 Message Date
Nick Craig-Wood
2fcc18779b Version v1.18 2015-08-17 17:59:37 +01:00
Nick Craig-Wood
96cc3e5a0b Add FAQ to menu and documentation fixes 2015-08-17 17:26:37 +01:00
Leonid Shalupov
cc8fe0630c Change email of Leonid Shalupov - fixes #94 2015-08-17 17:26:36 +01:00
Nick Craig-Wood
1d9e76bb0f dropbox: remove datastore - Fixes #55 #84
This means that dropbox no longer stores MD5SUMs and modified times.

Fixup the tests so that blank MD5SUMs are ignored, and that if
Precision is set to a fs.ModTimeNotSupported, ModTimes can be ignored too.

This opens the door for other FSs which don't support metadata easily.
2015-08-17 17:26:36 +01:00
Nick Craig-Wood
337110b7a0 s3: remove verbose debug about invalid md5sums for multipart upload 2015-08-16 18:14:22 +01:00
Nick Craig-Wood
83733205f6 Fix favicon - fixes #79 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
d8306938a1 drive: Add --drive-use-trash flag so rclone trashes instead of deletes - fixes #82 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
88ea8b305d drive: Add "Forbidden to download" message for files with no downloadURL - fixes #95 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
e2f4d7b5e3 FAQ entry about using rclone from multiple places - fixes #78 2015-08-16 15:51:39 +01:00
Nick Craig-Wood
8140869767 s3: Update documentation after transition to new SDK - also fixes #47 2015-08-16 15:51:38 +01:00
Nick Craig-Wood
6a8de87116 s3: make v2 signatures work for ceph 2015-08-16 15:51:38 +01:00
Nick Craig-Wood
0da6f24221 s3: use official github.com/aws/aws-sdk-go including multipart upload - fixes #101 2015-08-16 15:51:04 +01:00
Nick Craig-Wood
771e60bd07 Show errors when reading the config file 2015-08-15 17:15:02 +01:00
Nick Craig-Wood
40b3c4883f testing: add -remote flag to allow unit tests to run on other remotes
Use this on the individual fs unit tests, eg

    cd s3
    go run -v -remote mytestS3:
2015-08-15 15:40:18 +01:00
Nick Craig-Wood
e320f4a988 Add Shimon Doodkin to contributors 2015-08-10 11:25:21 +01:00
Shimon Doodkin
5835f15f21 linux installation instructions binary download 2015-08-10 11:22:34 +01:00
Leonid Shalupov
67c311233b Run travis builds on container-based infrastructure
See http://docs.travis-ci.com/user/migrating-from-legacy/
TL;DR It starts much faster
2015-08-10 11:17:54 +01:00
Leonid Shalupov
3fcff32524 do not print stats in quiet mode - fixes #70
...unless had some errors or stats interval requested.

Add fs.ErrorLog to differentiate between Logs which should be
suppressed and errors which shouldn't.
2015-08-10 11:17:54 +01:00
Nick Craig-Wood
472f065ce7 Make sizes in Dropbox 64 bit - fixes #80 2015-08-10 11:17:54 +01:00
Nick Craig-Wood
6c6d7eb770 Drop support for Go 1.1 2015-08-10 11:17:54 +01:00
Nick Craig-Wood
c646ada3c3 docs: add FAQ and update docs 2015-08-10 11:17:43 +01:00
Nick Craig-Wood
f55359b3ac Fix created directories not obeying umask 2015-07-29 10:21:12 +01:00
30 changed files with 730 additions and 525 deletions

View File

@@ -1,7 +1,7 @@
language: go
sudo: false
go:
- 1.1.2
- 1.2.2
- 1.3.3
- 1.4

View File

@@ -13,4 +13,5 @@ Contributors
------------
* Alex Couper <amcouper@gmail.com>
* Leonid Shalupov <shalupov@diverse.org.ru>
* Leonid Shalupov <leonid@shalupov.com>
* Shimon Doodkin <helpmepro1@gmail.com>

View File

@@ -21,6 +21,9 @@ about directories.
You can work round this to some extent with the`purge` command which
will delete everything under the path, **inluding** empty directories.
This may be fixed at some point in
[Issue #100](https://github.com/ncw/rclone/issues/100)
### Directory timestamps aren't preserved ##
For the same reason as the above, rclone doesn't have a concept of a

View File

@@ -1,12 +1,34 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2015-06-14"
date: "2015-08-17"
---
Changelog
---------
* v1.18 - 2015-08-17
* drive
* Add `--drive-use-trash` flag so rclone trashes instead of deletes
* Add "Forbidden to download" message for files with no downloadURL
* dropbox
* Remove datastore
* This was deprecated and it caused a lot of problems
* Modification times and MD5SUMs no longer stored
* Fix uploading files > 2GB
* s3
* use official AWS SDK from github.com/aws/aws-sdk-go
* **NB** will most likely require you to delete and recreate remote
* enable multipart upload which enables files > 5GB
* tested with Ceph / RadosGW / S3 emulation
* many thanks to Sam Liston and Brian Haymore at the [Utah
Center for High Performance Computing](https://www.chpc.utah.edu/) for a Ceph test account
* misc
* Show errors when reading the config file
* Do not print stats in quiet mode - thanks Leonid Shalupov
* Add FAQ
* Fix created directories not obeying umask
* Linux installation instructions - thanks Shimon Doodkin
* v1.17 - 2015-06-14
* dropbox: fix case insensitivity issues - thanks Leonid Shalupov
* v1.16 - 2015-06-09

View File

@@ -2,34 +2,34 @@
title: "Rclone downloads"
description: "Download rclone binaries for your OS."
type: page
date: "2015-06-14"
date: "2015-08-17"
---
Rclone Download v1.17
Rclone Download v1.18
=====================
* Windows
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-windows-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-windows-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-windows-amd64.zip)
* OSX
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-osx-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-osx-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-osx-amd64.zip)
* Linux
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.17-linux-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-linux-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-linux-arm.zip)
* FreeBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.17-freebsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-freebsd-arm.zip)
* NetBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.17-netbsd-arm.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-amd64.zip)
* [ARM - 32 Bit](http://downloads.rclone.org/rclone-v1.18-netbsd-arm.zip)
* OpenBSD
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.17-openbsd-amd64.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-386.zip)
* [AMD64 - 64 Bit](http://downloads.rclone.org/rclone-v1.18-openbsd-amd64.zip)
* Plan 9
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.17-plan9-386.zip)
* [386 - 32 Bit](http://downloads.rclone.org/rclone-v1.18-plan9-386.zip)
Older downloads can be found [here](http://downloads.rclone.org/)

View File

@@ -85,6 +85,12 @@ was
* They are deleted after 30 days or 100 revisions (whatever comes first).
* They do not count towards a user storage quota.
### Deleting files ###
By default rclone will delete files permanently when requested. If
sending them to the trash is required instead then use the
`--drive-use-trash` flag.
### Limitations ###
Drive has quite a lot of rate limiting. This causes rclone to be

View File

@@ -71,21 +71,12 @@ To copy a local directory to a dropbox directory called backup
rclone copy /home/source remote:backup
### Modified time ###
### Modified time and MD5SUMs ###
Md5sums and timestamps in RFC3339 format accurate to 1ns are stored in
a Dropbox datastore called "rclone".
Dropbox doesn't have the capability of storing modification times or
MD5SUMs so syncs will effectively have the `--size-only` flag set.
### Limitations ###
Dropbox datastores are limited to 100,000 rows so this is the maximum
number of files rclone can manage on Dropbox.
Dropbox is case sensitive which can sometimes cause duplicated files.
If you use the desktop sync tool and rclone on the same files then the
md5sums and modification times may get out of sync as far as rclone is
concerned. This will cause `Corrupted on transfer: md5sums differ`
error message when fetching files. You can work around this by using
the `--size-only` flag to ignore the md5sums and modification times
for these files.
Note that Dropbox is case sensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".

50
docs/content/faq.md Normal file
View File

@@ -0,0 +1,50 @@
---
title: "FAQ"
description: "Rclone Frequently Asked Questions"
date: "2015-06-06"
---
Frequently Asked Questions
--------------------------
### Do all cloud storage systems support all rclone commands ###
Yes they do. All the rclone commands (eg `sync`, `copy` etc) will
work on all the remote storage systems.
### Can rclone sync directly from drive to s3 ###
Rclone can sync between two remote cloud storage systems just fine.
Note that it effectively downloads the file and uploads it again, so
the node running rclone would need to have lots of bandwidth.
The syncs would be incremental (on a file by file basis).
Eg
rclone sync drive:Folder s3:bucket
### Using rclone from multiple locations at the same time ###
You can use rclone from multiple places at the same time if you choose
different subdirectory for the output, eg
```
Server A> rclone sync /tmp/whatever remote:ServerA
Server B> rclone sync /tmp/whatever remote:ServerB
```
If you sync to the same directory then you should use rclone copy
otherwise the two rclones may delete each others files, eg
```
Server A> rclone copy /tmp/whatever remote:Backup
Server B> rclone copy /tmp/whatever remote:Backup
```
The file names you upload from Server A and Server B should be
different in this case, otherwise some file systems (eg Drive) may
make duplicates.

View File

@@ -23,3 +23,17 @@ this (remove `-f` if using go < 1.4)
See the [Usage section](/docs/) of the docs for how to use rclone, or
run `rclone -h`.
linux binary downloaded files install example
-------
unzip rclone-v1.17-linux-amd64.zip
cd rclone-v1.17-linux-amd64
#copy binary file
sudo cp rclone /usr/sbin/
sudo chown root:root /usr/sbin/rclone
sudo chmod 755 /usr/sbin/rclone
#install manpage
sudo mkdir -p /usr/local/share/man/man1
sudo cp rclone.1 /usr/local/share/man/man1/
sudo mandb

View File

@@ -27,41 +27,55 @@ Choose a number from below
1) swift
2) s3
3) local
4) drive
4) google cloud storage
5) dropbox
6) drive
type> 2
AWS Access Key ID.
access_key_id> accesskey
AWS Secret Access Key (password).
secret_access_key> secretaccesskey
Endpoint for S3 API.
Region to connect to.
Choose a number from below, or type in your own value
* The default endpoint - a good choice if you are unsure.
* US Region, Northern Virginia or Pacific Northwest.
* Leave location constraint empty.
1) https://s3.amazonaws.com/
* US Region, Northern Virginia only.
* Leave location constraint empty.
2) https://s3-external-1.amazonaws.com
1) us-east-1
* US West (Oregon) Region
* Needs location constraint us-west-2.
2) us-west-2
[snip]
* South America (Sao Paulo) Region
* Needs location constraint sa-east-1.
9) https://s3-sa-east-1.amazonaws.com
endpoint> 1
Location constraint - must be set to match the Endpoint.
9) sa-east-1
* If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.
10) other-v2-signature
* If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.
11) other-v4-signature
region> 1
Endpoint for S3 API.
Leave blank if using AWS to use the default endpoint for the region.
Specify if using an S3 clone such as Ceph.
endpoint>
Location constraint - must be set to match the Region. Used when creating buckets only.
Choose a number from below, or type in your own value
* Empty for US Region, Northern Virginia or Pacific Northwest.
1)
* US West (Oregon) Region.
2) us-west-2
* US West (Northern California) Region.
3) us-west-1
* EU (Ireland) Region.
4) eu-west-1
[snip]
* South America (Sao Paulo) Region.
9) sa-east-1
location_constraint> 1
Remote config
--------------------
[remote]
access_key_id = accesskey
secret_access_key = secretaccesskey
endpoint = https://s3.amazonaws.com/
region = us-east-1
endpoint =
location_constraint =
--------------------
y) Yes this is OK
@@ -104,3 +118,48 @@ files in the bucket.
The modified time is stored as metadata on the object as
`X-Amz-Meta-Mtime` as floating point since the epoch accurate to 1 ns.
### Multipart uploads ###
rclone supports multipart uploads with S3 which means that it can
upload files bigger than 5GB. Note that files uploaded with multipart
upload don't have an MD5SUM.
### Ceph ###
Ceph is an object storage system which presents an Amazon S3 interface.
To use rclone with ceph, you need to set the following parameters in
the config.
```
access_key_id = Whatever
secret_access_key = Whatever
endpoint = https://ceph.endpoint.goes.here/
region = other-v2-signature
```
Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
only write `/` in the secret access key.
Eg the dump from Ceph looks something like this (irrelevant keys
removed).
```
{
"user_id": "xxx",
"display_name": "xxxx",
"keys": [
{
"user": "xxx",
"access_key": "xxxxxx",
"secret_key": "xxxxxx\/xxxx"
}
],
}
```
Because this is a json dump, it is encoding the `/` as `\/`, so if you
use the secret key as `xxxxxx/xxxx` it will work fine.

View File

@@ -3,6 +3,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="{{ .Description }}">
<meta name="author" content="Nick Craig-Wood">
<link rel="shortcut icon" type="image/png" href="/img/rclone-16x16.png"/>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),

View File

@@ -19,6 +19,7 @@
<li><a href="/docs/"><i class="fa fa-book"></i> Usage</a></li>
<li><a href="/changelog/"><i class="fa fa-book"></i> Changelog</a></li>
<li><a href="/bugs/"><i class="fa fa-book"></i> Bugs</a></li>
<li><a href="/faq/"><i class="fa fa-book"></i> FAQ</a></li>
<li><a href="/licence/"><i class="fa fa-book"></i> Licence</a></li>
<li><a href="/authors/"><i class="fa fa-book"></i> Authors</a></li>
</ul>

BIN
docs/static/img/rclone-16x16.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1019 B

View File

@@ -39,6 +39,7 @@ const (
var (
// Flags
driveFullList = pflag.BoolP("drive-full-list", "", true, "Use a full listing for directory list. More data but usually quicker.")
driveUseTrash = pflag.BoolP("drive-use-trash", "", false, "Send files to the trash instead of deleting permanently.")
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
chunkSize = fs.SizeSuffix(256 * 1024)
@@ -681,7 +682,7 @@ func (f *FsDrive) List() fs.ObjectsChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find root: %s", err)
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
if f.root == "" && *driveFullList {
err = f.listDirFull(f.rootId, "", out)
@@ -690,7 +691,7 @@ func (f *FsDrive) List() fs.ObjectsChan {
}
if err != nil {
fs.Stats.Error()
fs.Log(f, "List failed: %s", err)
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
@@ -705,7 +706,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
err := f.findRoot(false)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find root: %s", err)
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.rootId, "", true, false, func(item *drive.File) bool {
dir := &fs.Dir{
@@ -719,7 +720,7 @@ func (f *FsDrive) ListDir() fs.DirChan {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "ListDir failed: %s", err)
fs.ErrorLog(f, "ListDir failed: %s", err)
}
}
}()
@@ -803,7 +804,11 @@ func (f *FsDrive) Rmdir() error {
// Delete the directory if it isn't the root
if f.root != "" {
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
if *driveUseTrash {
_, err = f.svc.Files.Trash(f.rootId).Do()
} else {
err = f.svc.Files.Delete(f.rootId).Do()
}
})
if err != nil {
return err
@@ -832,7 +837,11 @@ func (f *FsDrive) Purge() error {
return err
}
f.call(&err, func() {
err = f.svc.Files.Delete(f.rootId).Do()
if *driveUseTrash {
_, err = f.svc.Files.Trash(f.rootId).Do()
} else {
err = f.svc.Files.Delete(f.rootId).Do()
}
})
f.resetRoot()
if err != nil {
@@ -934,7 +943,7 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read metadata: %s", err)
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
}
// New metadata
@@ -948,7 +957,7 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) {
})
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
return
}
// Update info from read data
@@ -962,6 +971,9 @@ func (o *FsObjectDrive) Storable() bool {
// Open an object for read
func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) {
if o.url == "" {
return nil, fmt.Errorf("Forbidden to download - check sharing permission")
}
req, err := http.NewRequest("GET", o.url, nil)
if err != nil {
return nil, err
@@ -1020,7 +1032,11 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro
func (o *FsObjectDrive) Remove() error {
var err error
o.drive.call(&err, func() {
err = o.drive.svc.Files.Delete(o.id).Do()
if *driveUseTrash {
_, err = o.drive.svc.Files.Trash(o.id).Do()
} else {
err = o.drive.svc.Files.Delete(o.id).Do()
}
})
return err
}

View File

@@ -6,11 +6,6 @@ Limitations of dropbox
File system is case insensitive
The datastore is limited to 100,000 records which therefore is the
limit of the number of files that rclone can use on dropbox.
FIXME only open datastore if we need it?
FIXME Getting this sometimes
Failed to copy: Upload failed: invalid character '<' looking for beginning of value
This is a JSON decode error - from Update / UploadByChunk
@@ -38,10 +33,10 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"path"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
@@ -50,17 +45,10 @@ import (
// Constants
const (
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "1n9m04y2zx7bf26"
uploadChunkSize = 64 * 1024 // chunk size for upload
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
datastoreName = "rclone"
tableName = "metadata"
md5sumField = "md5sum"
mtimeField = "mtime"
maxCommitRetries = 5
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
rcloneAppKey = "5jcck7diasz0rqy"
rcloneAppSecret = "1n9m04y2zx7bf26"
uploadChunkSize = 64 * 1024 // chunk size for upload
metadataLimit = dropbox.MetadataLimitDefault // max items to fetch at once
)
// Register with Fs
@@ -111,24 +99,19 @@ func configHelper(name string) {
// FsDropbox represents a remote dropbox server
type FsDropbox struct {
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
datastoreManager *dropbox.DatastoreManager
datastore *dropbox.Datastore
table *dropbox.Table
datastoreMutex sync.Mutex // lock this when using the datastore
datastoreErr error // pending errors on the datastore
db *dropbox.Dropbox // the connection to the dropbox server
root string // the path we are working on
slashRoot string // root with "/" prefix, lowercase
slashRootSlash string // root with "/" prefix and postfix, lowercase
}
// FsObjectDropbox describes a dropbox object
type FsObjectDropbox struct {
dropbox *FsDropbox // what this object is part of
remote string // The remote path
md5sum string // md5sum of the object
bytes int64 // size of the object
modTime time.Time // time it was last modified
dropbox *FsDropbox // what this object is part of
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
hasMetadata bool // metadata is valid
}
// ------------------------------------------------------------
@@ -170,12 +153,6 @@ func NewFs(name, root string) (fs.Fs, error) {
// Authorize the client
db.SetAccessToken(token)
// Make a db to store rclone metadata in
f.datastoreManager = db.NewDatastoreManager()
// Open the datastore in the background
go f.openDataStore()
// See if the root is actually an object
entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit)
if err == nil && !entry.IsDir {
@@ -205,30 +182,6 @@ func (f *FsDropbox) setRoot(root string) {
}
}
// Opens the datastore in f
func (f *FsDropbox) openDataStore() {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
fs.Debug(f, "Open rclone datastore")
// Open the rclone datastore
var err error
f.datastore, err = f.datastoreManager.OpenDatastore(datastoreName)
if err != nil {
fs.Log(f, "Failed to open datastore: %v", err)
f.datastoreErr = err
return
}
// Get the table we are using
f.table, err = f.datastore.GetTable(tableName)
if err != nil {
fs.Log(f, "Failed to open datastore table: %v", err)
f.datastoreErr = err
return
}
fs.Debug(f, "Open rclone datastore finished")
}
// Return an FsObject from a path
//
// May return nil if an error occurred
@@ -262,7 +215,7 @@ func (f *FsDropbox) stripRoot(path string) *string {
if !strings.HasPrefix(lowercase, f.slashRootSlash) {
fs.Stats.Error()
fs.Log(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
return nil
}
@@ -281,11 +234,11 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
deltaPage, err := f.db.Delta(cursor, f.slashRoot)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list: %s", err)
fs.ErrorLog(f, "Couldn't list: %s", err)
break
} else {
if deltaPage.Reset && cursor != "" {
fs.Log(f, "Unexpected reset during listing - try again")
fs.ErrorLog(f, "Unexpected reset during listing - try again")
fs.Stats.Error()
break
}
@@ -295,17 +248,10 @@ func (f *FsDropbox) list(out fs.ObjectsChan) {
entry := deltaEntry.Entry
if entry == nil {
// This notifies of a deleted object
fs.Debug(f, "Deleting metadata for %q", deltaEntry.Path)
key := metadataKey(deltaEntry.Path) // Path is lowercased
err := f.deleteMetadata(key)
if err != nil {
fs.Debug(f, "Failed to delete metadata for %q", deltaEntry.Path)
// Don't accumulate Error here
}
} else {
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
fs.Stats.Error()
fs.Log(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
fs.ErrorLog(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
continue
}
@@ -374,7 +320,7 @@ func (f *FsDropbox) ListDir() fs.DirChan {
entry, err := f.db.Metadata(f.root, true, false, "", "", metadataLimit)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list directories in root: %s", err)
fs.ErrorLog(f, "Couldn't list directories in root: %s", err)
} else {
for i := range entry.Contents {
entry := &entry.Contents[i]
@@ -388,7 +334,7 @@ func (f *FsDropbox) ListDir() fs.DirChan {
out <- &fs.Dir{
Name: *name,
When: time.Time(entry.ClientMtime),
Bytes: int64(entry.Bytes),
Bytes: entry.Bytes,
Count: -1,
}
}
@@ -452,8 +398,8 @@ func (f *FsDropbox) Rmdir() error {
}
// Return the precision
func (fs *FsDropbox) Precision() time.Duration {
return time.Nanosecond
func (f *FsDropbox) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files and the container
@@ -462,81 +408,11 @@ func (fs *FsDropbox) Precision() time.Duration {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *FsDropbox) Purge() error {
// Delete metadata first
var wg sync.WaitGroup
to_be_deleted := f.List()
wg.Add(fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range to_be_deleted {
o := dst.(*FsObjectDropbox)
o.deleteMetadata()
}
}()
}
wg.Wait()
// Let dropbox delete the filesystem tree
_, err := f.db.Delete(f.slashRoot)
return err
}
// Tries the transaction in fn then calls commit, repeating until retry limit
//
// Holds datastore mutex while in progress
func (f *FsDropbox) transaction(fn func() error) error {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return f.datastoreErr
}
var err error
for i := 1; i <= maxCommitRetries; i++ {
err = fn()
if err != nil {
return err
}
err = f.datastore.Commit()
if err == nil {
break
}
fs.Debug(f, "Retrying transaction %d/%d", i, maxCommitRetries)
}
if err != nil {
return fmt.Errorf("Failed to commit metadata changes: %s", err)
}
return nil
}
// Deletes the medadata associated with this key
func (f *FsDropbox) deleteMetadata(key string) error {
return f.transaction(func() error {
record, err := f.table.Get(key)
if err != nil {
return fmt.Errorf("Couldn't get record: %s", err)
}
if record == nil {
return nil
}
record.DeleteRecord()
return nil
})
}
// Reads the record attached to key
//
// Holds datastore mutex while in progress
func (f *FsDropbox) readRecord(key string) (*dropbox.Record, error) {
f.datastoreMutex.Lock()
defer f.datastoreMutex.Unlock()
if f.datastoreErr != nil {
return nil, f.datastoreErr
}
return f.table.Get(key)
}
// ------------------------------------------------------------
// Return the parent Fs
@@ -558,33 +434,8 @@ func (o *FsObjectDropbox) Remote() string {
}
// Md5sum returns the Md5sum of an object returning a lowercase hex string
//
// FIXME has to download the file!
func (o *FsObjectDropbox) Md5sum() (string, error) {
if o.md5sum != "" {
return o.md5sum, nil
}
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %s", err)
return "", fmt.Errorf("Failed to read metadata: %s", err)
}
// For pre-existing files which have no md5sum can read it and set it?
// in, err := o.Open()
// if err != nil {
// return "", err
// }
// defer in.Close()
// hash := md5.New()
// _, err = io.Copy(hash, in)
// if err != nil {
// return "", err
// }
// o.md5sum = fmt.Sprintf("%x", hash.Sum(nil))
return o.md5sum, nil
return "", nil
}
// Size returns the size of an object in bytes
@@ -596,8 +447,9 @@ func (o *FsObjectDropbox) Size() int64 {
//
// This isn't a complete set of metadata and has an inacurate date
func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) {
o.bytes = int64(info.Bytes)
o.bytes = info.Bytes
o.modTime = time.Time(info.ClientMtime)
o.hasMetadata = true
}
// Reads the entry from dropbox
@@ -645,55 +497,9 @@ func (o *FsObjectDropbox) metadataKey() string {
// readMetaData gets the info if it hasn't already been fetched
func (o *FsObjectDropbox) readMetaData() (err error) {
if o.md5sum != "" {
if o.hasMetadata {
return nil
}
// fs.Debug(o, "Reading metadata from datastore")
record, err := o.dropbox.readRecord(o.metadataKey())
if err != nil {
fs.Debug(o, "Couldn't read metadata: %s", err)
record = nil
}
if record != nil {
// Read md5sum
md5sumInterface, ok, err := record.Get(md5sumField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find md5sum in record")
} else {
md5sum, ok := md5sumInterface.(string)
if !ok {
fs.Debug(o, "md5sum not a string")
} else {
o.md5sum = md5sum
}
}
// read mtime
mtimeInterface, ok, err := record.Get(mtimeField)
if err != nil {
return err
}
if !ok {
fs.Debug(o, "Couldn't find mtime in record")
} else {
mtime, ok := mtimeInterface.(string)
if !ok {
fs.Debug(o, "mtime not a string")
} else {
modTime, err := time.Parse(timeFormatIn, mtime)
if err != nil {
return err
}
o.modTime = modTime
}
}
}
// Last resort
return o.readEntryAndSetMetadata()
}
@@ -711,59 +517,12 @@ func (o *FsObjectDropbox) ModTime() time.Time {
return o.modTime
}
// Sets the modification time of the local fs object into the record
// FIXME if we don't set md5sum what will that do?
func (o *FsObjectDropbox) setModTimeAndMd5sum(modTime time.Time, md5sum string) error {
key := o.metadataKey()
// fs.Debug(o, "Writing metadata to datastore")
return o.dropbox.transaction(func() error {
record, err := o.dropbox.table.GetOrInsert(key)
if err != nil {
return fmt.Errorf("Couldn't read record: %s", err)
}
if md5sum != "" {
err = record.Set(md5sumField, md5sum)
if err != nil {
return fmt.Errorf("Couldn't set md5sum record: %s", err)
}
o.md5sum = md5sum
}
if !modTime.IsZero() {
mtime := modTime.Format(timeFormatOut)
err := record.Set(mtimeField, mtime)
if err != nil {
return fmt.Errorf("Couldn't set mtime record: %s", err)
}
o.modTime = modTime
}
return nil
})
}
// Deletes the medadata associated with this file
//
// It logs any errors
func (o *FsObjectDropbox) deleteMetadata() {
fs.Debug(o, "Deleting metadata from datastore")
err := o.dropbox.deleteMetadata(o.metadataKey())
if err != nil {
fs.Log(o, "Error deleting metadata: %v", err)
fs.Stats.Error()
}
}
// Sets the modification time of the local fs object
//
// Commits the datastore
func (o *FsObjectDropbox) SetModTime(modTime time.Time) {
err := o.setModTimeAndMd5sum(modTime, "")
if err != nil {
fs.Stats.Error()
fs.Log(o, err.Error())
}
// FIXME not implemented
return
}
// Is this object storable
@@ -783,22 +542,16 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) {
//
// The new object may have been created if an error is returned
func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error {
// Calculate md5sum as we upload it
hash := md5.New()
rc := &readCloser{in: io.TeeReader(in, hash)}
entry, err := o.dropbox.db.UploadByChunk(rc, uploadChunkSize, o.remotePath(), true, "")
entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), uploadChunkSize, o.remotePath(), true, "")
if err != nil {
return fmt.Errorf("Upload failed: %s", err)
}
o.setMetadataFromEntry(entry)
md5sum := fmt.Sprintf("%x", hash.Sum(nil))
return o.setModTimeAndMd5sum(modTime, md5sum)
return nil
}
// Remove an object
func (o *FsObjectDropbox) Remove() error {
o.deleteMetadata()
_, err := o.dropbox.db.Delete(o.remotePath())
return err
}

View File

@@ -51,7 +51,7 @@ func (tree *NameTreeNode) getTreeNode(path string) *NameTreeNode {
for _, component := range strings.Split(path, "/") {
if len(component) == 0 {
fs.Stats.Error()
fs.Log(tree, "getTreeNode: path component is empty (full path %q)", path)
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
return nil
}
@@ -72,7 +72,7 @@ func (tree *NameTreeNode) getTreeNode(path string) *NameTreeNode {
func (tree *NameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
if len(caseCorrectDirectoryName) == 0 {
fs.Stats.Error()
fs.Log(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
return
}
@@ -89,7 +89,7 @@ func (tree *NameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCor
} else {
if len(directory.CaseCorrectName) > 0 {
fs.Stats.Error()
fs.Log(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
return
}
@@ -105,7 +105,7 @@ func (tree *NameTreeNode) PutFile(parentPath string, caseCorrectFileName string,
if node.Files[caseCorrectFileName] != nil {
fs.Stats.Error()
fs.Log(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
return
}
@@ -124,7 +124,7 @@ func (tree *NameTreeNode) GetPathWithCorrectCase(path string) *string {
for _, component := range strings.Split(path, "/") {
if component == "" {
fs.Stats.Error()
fs.Log(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
return nil
}
@@ -161,7 +161,7 @@ func (tree *NameTreeNode) walkFilesRec(currentPath string, walkFunc NameTreeFile
caseCorrectName := directory.CaseCorrectName
if caseCorrectName == "" {
fs.Stats.Error()
fs.Log(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
continue
}

View File

@@ -207,7 +207,7 @@ func LoadConfig() {
var err error
ConfigFile, err = goconfig.LoadConfigFile(ConfigPath)
if err != nil {
log.Printf("Failed to load config file %v - using defaults", ConfigPath)
log.Printf("Failed to load config file %v - using defaults: %v", ConfigPath, err)
ConfigFile, err = goconfig.LoadConfigFile(os.DevNull)
if err != nil {
log.Fatalf("Failed to read null config file: %v", err)

View File

@@ -14,6 +14,8 @@ import (
const (
// User agent for Fs which can set it
UserAgent = "rclone/" + Version
// Very large precision value to show mod time isn't supported
ModTimeNotSupported = 100 * 365 * 24 * time.Hour
)
// Globals
@@ -107,9 +109,11 @@ type Object interface {
Remote() string
// Md5sum returns the md5 checksum of the file
// If no Md5sum is available it returns ""
Md5sum() (string, error)
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
ModTime() time.Time
// SetModTime sets the metadata on the object to set the modification date
@@ -264,6 +268,12 @@ func Log(o interface{}, text string, args ...interface{}) {
}
}
// Write error log output for this Object or Fs
// Unconditionally logs a message regardless of Config.Quiet or Config.Verbose
func ErrorLog(o interface{}, text string, args ...interface{}) {
OutputLog(o, text, args...)
}
// checkClose is a utility function used to check the return from
// Close in a defer statement.
func checkClose(c io.Closer, err *error) {

View File

@@ -27,6 +27,14 @@ func CalculateModifyWindow(fs ...Fs) {
Debug(fs[0], "Modify window is %s\n", Config.ModifyWindow)
}
// Md5sumsEqual checks to see if src == dst, but ignores empty strings
func Md5sumsEqual(src, dst string) bool {
if src == "" || dst == "" {
return true
}
return src == dst
}
// Check the two files to see if the MD5sums are the same
//
// May return an error which will already have been logged
@@ -36,18 +44,18 @@ func CheckMd5sums(src, dst Object) (bool, error) {
srcMd5, err := src.Md5sum()
if err != nil {
Stats.Error()
Log(src, "Failed to calculate src md5: %s", err)
ErrorLog(src, "Failed to calculate src md5: %s", err)
return false, err
}
dstMd5, err := dst.Md5sum()
if err != nil {
Stats.Error()
Log(dst, "Failed to calculate dst md5: %s", err)
ErrorLog(dst, "Failed to calculate dst md5: %s", err)
return false, err
}
// Debug("Src MD5 %s", srcMd5)
// Debug("Dst MD5 %s", obj.Hash)
return srcMd5 == dstMd5, nil
return Md5sumsEqual(srcMd5, dstMd5), nil
}
// Checks to see if the src and dst objects are equal by looking at
@@ -148,7 +156,7 @@ tryAgain:
in0, err := src.Open()
if err != nil {
Stats.Error()
Log(src, "Failed to open: %s", err)
ErrorLog(src, "Failed to open: %s", err)
return
}
in := NewAccount(in0) // account the transfer
@@ -178,7 +186,7 @@ tryAgain:
}
if err != nil {
Stats.Error()
Log(src, "Failed to copy: %s", err)
ErrorLog(src, "Failed to copy: %s", err)
removeFailedCopy(dst)
return
}
@@ -187,7 +195,7 @@ tryAgain:
if src.Size() != dst.Size() {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
Log(dst, "%s", err)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
}
@@ -197,16 +205,16 @@ tryAgain:
srcMd5sum, md5sumErr := src.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(src, "Failed to read md5sum: %s", md5sumErr)
ErrorLog(src, "Failed to read md5sum: %s", md5sumErr)
} else if srcMd5sum != "" {
dstMd5sum, md5sumErr := dst.Md5sum()
if md5sumErr != nil {
Stats.Error()
Log(dst, "Failed to read md5sum: %s", md5sumErr)
} else if dstMd5sum != "" && srcMd5sum != dstMd5sum {
ErrorLog(dst, "Failed to read md5sum: %s", md5sumErr)
} else if !Md5sumsEqual(srcMd5sum, dstMd5sum) {
Stats.Error()
err = fmt.Errorf("Corrupted on transfer: md5sums differ %q vs %q", srcMd5sum, dstMd5sum)
Log(dst, "%s", err)
ErrorLog(dst, "%s", err)
removeFailedCopy(dst)
return
}
@@ -280,7 +288,7 @@ func DeleteFiles(to_be_deleted ObjectsChan) {
Stats.DoneChecking(dst)
if err != nil {
Stats.Error()
Log(dst, "Couldn't delete: %s", err)
ErrorLog(dst, "Couldn't delete: %s", err)
} else {
Debug(dst, "Deleted")
}
@@ -405,13 +413,13 @@ func Check(fdst, fsrc Fs) error {
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
Log(dst, "File not in %v", fsrc)
ErrorLog(dst, "File not in %v", fsrc)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
Log(src, "File not in %v", fdst)
ErrorLog(src, "File not in %v", fdst)
}
checks := make(chan []Object, Config.Transfers)
@@ -433,7 +441,7 @@ func Check(fdst, fsrc Fs) error {
if src.Size() != dst.Size() {
Stats.DoneChecking(src)
Stats.Error()
Log(src, "Sizes differ")
ErrorLog(src, "Sizes differ")
continue
}
same, err := CheckMd5sums(src, dst)
@@ -443,7 +451,7 @@ func Check(fdst, fsrc Fs) error {
}
if !same {
Stats.Error()
Log(src, "Md5sums differ")
ErrorLog(src, "Md5sums differ")
}
Debug(src, "OK")
}
@@ -525,7 +533,7 @@ func Md5sum(f Fs, w io.Writer) error {
Stats.DoneChecking(o)
if err != nil {
Debug(o, "Failed to read MD5: %v", err)
md5sum = "UNKNOWN"
md5sum = "ERROR"
}
syncFprintf(w, "%32s %s\n", md5sum, o.Remote())
})

View File

@@ -398,14 +398,37 @@ func TestLsLong(t *testing.T) {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
timeFormat := "2006-01-02 15:04:05"
m1 := regexp.MustCompile(`(?m)^ 0 ` + t2.Local().Format(timeFormat) + `\.\d{9} empty space$`)
if !m1.MatchString(res) {
t.Errorf("empty space missing: %q", res)
lines := strings.Split(strings.Trim(res, "\n"), "\n")
if len(lines) != 2 {
t.Fatalf("Wrong number of lines in list: %q", lines)
}
m2 := regexp.MustCompile(`(?m)^ 60 ` + t1.Local().Format(timeFormat) + `\.\d{9} potato2$`)
if !m2.MatchString(res) {
timeFormat := "2006-01-02 15:04:05.000000000"
precision := fremote.Precision()
checkTime := func(m, filename string, expected time.Time) {
modTime, err := time.Parse(timeFormat, m)
if err != nil {
t.Errorf("Error parsing %q: %v", m, err)
} else {
dt, ok := fstest.CheckTimeEqualWithPrecision(expected, modTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", filename, dt, precision, modTime, expected, precision)
}
}
}
m1 := regexp.MustCompile(`(?m)^ 0 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) empty space$`)
if ms := m1.FindStringSubmatch(res); ms == nil {
t.Errorf("empty space missing: %q", res)
} else {
checkTime(ms[1], "empty space", t2.Local())
}
m2 := regexp.MustCompile(`(?m)^ 60 (\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d{9}) potato2$`)
if ms := m2.FindStringSubmatch(res); ms == nil {
t.Errorf("potato2 missing: %q", res)
} else {
checkTime(ms[1], "potato2", t1.Local())
}
}
@@ -416,10 +439,12 @@ func TestMd5sum(t *testing.T) {
t.Fatalf("List failed: %v", err)
}
res := buf.String()
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") {
if !strings.Contains(res, "d41d8cd98f00b204e9800998ecf8427e empty space\n") &&
!strings.Contains(res, " empty space\n") {
t.Errorf("empty space missing: %q", res)
}
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") {
if !strings.Contains(res, "6548b156ea68a4e003e786df99eee76 potato2\n") &&
!strings.Contains(res, " potato2\n") {
t.Errorf("potato2 missing: %q", res)
}
}

View File

@@ -1,3 +1,3 @@
package fs
const Version = "v1.17"
const Version = "v1.18"

View File

@@ -30,10 +30,19 @@ type Item struct {
Size int64
}
// Checks the times are equal within the precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// check the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
dt := modTime.Sub(i.ModTime)
if dt >= precision || dt <= -precision {
dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
if !ok {
t.Errorf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision)
}
}
@@ -47,7 +56,7 @@ func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
if err != nil {
t.Fatalf("Failed to read md5sum for %q: %v", obj.Remote(), err)
}
if i.Md5sum != Md5sum {
if !fs.Md5sumsEqual(i.Md5sum, Md5sum) {
t.Errorf("%s: Md5sum incorrect - expecting %q got %q", obj.Remote(), i.Md5sum, Md5sum)
}
if i.Size != obj.Size() {

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"crypto/md5"
"encoding/hex"
"flag"
"io"
"log"
"os"
@@ -32,11 +33,16 @@ var (
}
)
func init() {
flag.StringVar(&RemoteName, "remote", "", "Set this to override the default remote name (eg s3:)")
}
func TestInit(t *testing.T) {
var err error
fs.LoadConfig()
fs.Config.Verbose = false
fs.Config.Quiet = true
t.Logf("Using remote %q", RemoteName)
if RemoteName == "" {
RemoteName, err = fstest.LocalRemote()
if err != nil {
@@ -252,6 +258,9 @@ func TestFsRmdirFull(t *testing.T) {
func TestFsPrecision(t *testing.T) {
skipIfNotOk(t)
precision := remote.Precision()
if precision == fs.ModTimeNotSupported {
return
}
if precision > time.Second || precision < 0 {
t.Fatalf("Precision out of range %v", precision)
}
@@ -295,7 +304,7 @@ func TestObjectMd5sum(t *testing.T) {
if err != nil {
t.Errorf("Error in Md5sum: %v", err)
}
if Md5sum != file1.Md5sum {
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}
@@ -345,7 +354,7 @@ func TestObjectOpen(t *testing.T) {
t.Fatalf("in.Close() return error: %v", err)
}
Md5sum := hex.EncodeToString(hash.Sum(nil))
if Md5sum != file1.Md5sum {
if !fs.Md5sumsEqual(Md5sum, file1.Md5sum) {
t.Errorf("Md5sum is wrong %v != %v", Md5sum, file1.Md5sum)
}
}

View File

@@ -251,7 +251,7 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) {
objects, err := list.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
return
}
if !directories {
@@ -286,7 +286,7 @@ func (f *FsStorage) List() fs.ObjectsChan {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects
go func() {
@@ -310,7 +310,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
defer close(out)
if f.projectNumber == "" {
fs.Stats.Error()
fs.Log(f, "Can't list buckets without project number")
fs.ErrorLog(f, "Can't list buckets without project number")
return
}
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
@@ -318,7 +318,7 @@ func (f *FsStorage) ListDir() fs.DirChan {
buckets, err := listBuckets.Do()
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %v", err)
fs.ErrorLog(f, "Couldn't list buckets: %v", err)
break
} else {
for _, bucket := range buckets.Items {
@@ -505,7 +505,7 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) {
newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
o.setMetaData(newObject)
}

View File

@@ -110,12 +110,12 @@ func (f *FsLocal) List() fs.ObjectsChan {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", path, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to get relative path %s: %s", path, err)
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
return nil
}
if remote == "." {
@@ -132,7 +132,7 @@ func (f *FsLocal) List() fs.ObjectsChan {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", f.root, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
}
close(out)
}()
@@ -161,7 +161,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
items, err := ioutil.ReadDir(f.root)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't find read directory: %s", err)
fs.ErrorLog(f, "Couldn't find read directory: %s", err)
} else {
for _, item := range items {
if item.IsDir() {
@@ -176,7 +176,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
err := filepath.Walk(dirpath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", path, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
} else {
dir.Count += 1
dir.Bytes += fi.Size()
@@ -185,7 +185,7 @@ func (f *FsLocal) ListDir() fs.DirChan {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Failed to open directory: %s: %s", dirpath, err)
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
}
out <- dir
}
@@ -210,7 +210,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64
// Mkdir creates the directory if it doesn't exist
func (f *FsLocal) Mkdir() error {
return os.MkdirAll(f.root, 0770)
return os.MkdirAll(f.root, 0777)
}
// Rmdir removes the directory
@@ -324,7 +324,7 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
in, err := os.Open(o.path)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to open: %s", err)
fs.ErrorLog(o, "Failed to open: %s", err)
return "", err
}
hash := md5.New()
@@ -332,12 +332,12 @@ func (o *FsObjectLocal) Md5sum() (string, error) {
closeErr := in.Close()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read: %s", err)
fs.ErrorLog(o, "Failed to read: %s", err)
return "", err
}
if closeErr != nil {
fs.Stats.Error()
fs.Log(o, "Failed to close: %s", closeErr)
fs.ErrorLog(o, "Failed to close: %s", closeErr)
return "", closeErr
}
o.md5sum = hex.EncodeToString(hash.Sum(nil))
@@ -429,7 +429,7 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) {
// Update the object from in with modTime and size
func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error {
dir := path.Dir(o.path)
err := os.MkdirAll(dir, 0770)
err := os.MkdirAll(dir, 0777)
if err != nil {
return err
}

View File

@@ -24,6 +24,7 @@ docs = [
"local.md",
"changelog.md",
"bugs.md",
"faq.md",
"licence.md",
"authors.md",
"contact.md",

View File

@@ -377,7 +377,7 @@ func main() {
// Run the actual command
if command.Run != nil {
command.Run(fdst, fsrc)
if !command.NoStats {
if !command.NoStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fmt.Fprintln(os.Stderr, fs.Stats)
}
if fs.Config.Verbose {

377
s3/s3.go
View File

@@ -3,19 +3,31 @@ package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"errors"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/goamz/aws"
"github.com/ncw/goamz/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/service"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
@@ -33,39 +45,48 @@ func init() {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password). ",
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.",
Name: "region",
Help: "Region to connect to.",
Examples: []fs.OptionExample{{
Value: "https://s3.amazonaws.com/",
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "https://s3-external-1.amazonaws.com",
Help: "US Region, Northern Virginia only.\nLeave location constraint empty.",
}, {
Value: "https://s3-us-west-2.amazonaws.com",
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "https://s3-us-west-1.amazonaws.com",
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "https://s3-eu-west-1.amazonaws.com",
Value: "eu-west-1",
Help: "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "https://s3-ap-southeast-1.amazonaws.com",
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "https://s3-ap-southeast-2.amazonaws.com",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint .",
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "https://s3-ap-northeast-1.amazonaws.com",
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "https://s3-sa-east-1.amazonaws.com",
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "other-v2-signature",
Help: "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint.",
}, {
Value: "other-v4-signature",
Help: "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Endpoint.",
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
@@ -100,17 +121,18 @@ func init() {
// Constants
const (
metaMtime = "X-Amz-Meta-Mtime" // the meta key to store mtime in
listChunkSize = 1024 // number of items to read at once
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
listChunkSize = 1024 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
)
// FsS3 represents a remote s3 server
type FsS3 struct {
c *s3.S3 // the connection to the s3 server
b *s3.Bucket // the connection to the bucket
bucket string // the bucket we are working on
perm s3.ACL // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
c *s3.S3 // the connection to the s3 server
bucket string // the bucket we are working on
perm string // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
locationConstraint string // location constraint of new buckets
}
// FsObjectS3 describes a s3 object
@@ -119,12 +141,12 @@ type FsObjectS3 struct {
//
// List will read everything but meta - to fill that in need to call
// readMetaData
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta s3.Headers // The object metadata if known - may be nil
s3 *FsS3 // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
}
// ------------------------------------------------------------
@@ -163,28 +185,42 @@ func s3Connection(name string) (*s3.S3, error) {
if secretAccessKey == "" {
return nil, errors.New("secret_access_key not found")
}
auth := aws.Auth{AccessKey: accessKeyId, SecretKey: secretAccessKey}
auth := credentials.NewStaticCredentials(accessKeyId, secretAccessKey, "")
// FIXME look through all the regions by name and use one of them if found
// Synthesize the region
s3Endpoint := fs.ConfigFile.MustValue(name, "endpoint")
if s3Endpoint == "" {
s3Endpoint = "https://s3.amazonaws.com/"
endpoint := fs.ConfigFile.MustValue(name, "endpoint")
region := fs.ConfigFile.MustValue(name, "region")
if region == "" && endpoint == "" {
endpoint = "https://s3.amazonaws.com/"
}
region := aws.Region{
Name: "s3",
S3Endpoint: s3Endpoint,
S3LocationConstraint: false,
if region == "" {
region = "us-east-1"
}
s3LocationConstraint := fs.ConfigFile.MustValue(name, "location_constraint")
if s3LocationConstraint != "" {
region.Name = s3LocationConstraint
region.S3LocationConstraint = true
awsConfig := aws.NewConfig().
WithRegion(region).
WithMaxRetries(maxRetries).
WithCredentials(auth).
WithEndpoint(endpoint).
WithHTTPClient(fs.Config.Client()).
WithS3ForcePathStyle(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
c := s3.New(awsConfig)
if region == "other-v2-signature" {
fs.Debug(name, "Using v2 auth")
signer := func(req *service.Request) {
// Ignore AnonymousCredentials object
if req.Service.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(accessKeyId, secretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBack(service.BuildContentLength)
c.Handlers.Sign.PushBack(signer)
}
c := s3.New(auth, region)
c.Client = fs.Config.Client()
// Add user agent
c.Handlers.Build.PushBack(func(r *service.Request) {
r.HTTPRequest.Header.Set("User-Agent", fs.UserAgent)
})
return c, nil
}
@@ -201,14 +237,18 @@ func NewFs(name, root string) (fs.Fs, error) {
f := &FsS3{
c: c,
bucket: bucket,
b: c.Bucket(bucket),
perm: s3.Private, // FIXME need user to specify
root: directory,
// FIXME perm: s3.Private, // FIXME need user to specify
root: directory,
locationConstraint: fs.ConfigFile.MustValue(name, "location_constraint"),
}
if f.root != "" {
f.root += "/"
// Check to see if the object exists
_, err = f.b.Head(directory, nil)
req := s3.HeadObjectInput{
Bucket: &f.bucket,
Key: &directory,
}
_, err = f.c.HeadObject(&req)
if err == nil {
remote := path.Base(directory)
f.root = path.Dir(directory)
@@ -222,27 +262,28 @@ func NewFs(name, root string) (fs.Fs, error) {
return fs.NewLimited(f, obj), nil
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Key) fs.Object {
func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object {
o := &FsObjectS3{
s3: f,
remote: remote,
}
if info != nil {
// Set info but not meta
var err error
o.lastModified, err = time.Parse(time.RFC3339, info.LastModified)
if err != nil {
fs.Log(o, "Failed to read last modified: %s", err)
if info.LastModified == nil {
fs.Log(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = info.ETag
o.bytes = info.Size
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
@@ -263,50 +304,66 @@ func (f *FsS3) NewFsObject(remote string) fs.Object {
// list the objects into the function supplied
//
// If directories is set it only sends directories
func (f *FsS3) list(directories bool, fn func(string, *s3.Key)) {
func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) {
maxKeys := int64(listChunkSize)
delimiter := ""
if directories {
delimiter = "/"
}
marker := ""
var marker *string
for {
objects, err := f.b.List(f.root, delimiter, marker, listChunkSize)
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &f.root,
MaxKeys: &maxKeys,
Marker: marker,
}
resp, err := f.c.ListObjects(&req)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read bucket %q: %s", f.bucket, err)
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err)
break
} else {
rootLength := len(f.root)
if directories {
for _, remote := range objects.CommonPrefixes {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Log(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
continue
}
remote := remote[rootLength:]
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
fn(remote, &s3.Key{Key: remote})
fn(remote, &s3.Object{Key: &remote})
}
} else {
for i := range objects.Contents {
object := &objects.Contents[i]
if !strings.HasPrefix(object.Key, f.root) {
fs.Log(f, "Odd name received %q", object.Key)
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Log(f, "Odd name received %q", key)
continue
}
remote := object.Key[rootLength:]
remote := key[rootLength:]
fn(remote, object)
}
}
}
if !objects.IsTruncated {
break
}
// Use NextMarker if set, otherwise use last Key
marker = objects.NextMarker
if marker == "" {
marker = objects.Contents[len(objects.Contents)-1].Key
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
}
}
}
@@ -318,11 +375,11 @@ func (f *FsS3) List() fs.ObjectsChan {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a bucket using lsd")
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
go func() {
defer close(out)
f.list(false, func(remote string, object *s3.Key) {
f.list(false, func(remote string, object *s3.Object) {
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
}
@@ -339,15 +396,16 @@ func (f *FsS3) ListDir() fs.DirChan {
// List the buckets
go func() {
defer close(out)
buckets, err := f.c.ListBuckets()
req := s3.ListBucketsInput{}
resp, err := f.c.ListBuckets(&req)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list buckets: %s", err)
fs.ErrorLog(f, "Couldn't list buckets: %s", err)
} else {
for _, bucket := range buckets {
for _, bucket := range resp.Buckets {
out <- &fs.Dir{
Name: bucket.Name,
When: bucket.CreationDate,
Name: aws.StringValue(bucket.Name),
When: aws.TimeValue(bucket.CreationDate),
Bytes: -1,
Count: -1,
}
@@ -358,10 +416,14 @@ func (f *FsS3) ListDir() fs.DirChan {
// List the directories in the path in the bucket
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Key) {
f.list(true, func(remote string, object *s3.Object) {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
out <- &fs.Dir{
Name: remote,
Bytes: object.Size,
Bytes: size,
Count: 0,
}
})
@@ -379,9 +441,18 @@ func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (
// Mkdir creates the bucket if it doesn't exist
func (f *FsS3) Mkdir() error {
err := f.b.PutBucket(f.perm)
if err, ok := err.(*s3.Error); ok {
if err.Code == "BucketAlreadyOwnedByYou" {
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.perm,
}
if f.locationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.locationConstraint,
}
}
_, err := f.c.CreateBucket(&req)
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
return nil
}
}
@@ -392,7 +463,11 @@ func (f *FsS3) Mkdir() error {
//
// Returns an error if it isn't empty
func (f *FsS3) Rmdir() error {
return f.b.DelBucket()
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
_, err := f.c.DeleteBucket(&req)
return err
}
// Return the precision
@@ -427,7 +502,7 @@ func (o *FsObjectS3) Md5sum() (string, error) {
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
@@ -440,28 +515,17 @@ func (o *FsObjectS3) Size() int64 {
// readMetaData gets the metadata if it hasn't already been fetched
//
// if we get a 404 error then we retry a few times for eventual
// consistency reasons
//
// it also sets the info
func (o *FsObjectS3) readMetaData() (err error) {
if o.meta != nil {
return nil
}
var headers s3.Headers
// Try reading the metadata a few times (with exponential
// backoff) to get around eventual consistency on 404 error
for tries := uint(0); tries < 10; tries++ {
headers, err = o.s3.b.Head(o.s3.root+o.remote, nil)
if s3Err, ok := err.(*s3.Error); ok {
if s3Err.StatusCode == http.StatusNotFound {
time.Sleep(5 * time.Millisecond << tries)
continue
}
}
break
key := o.s3.root + o.remote
req := s3.HeadObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
resp, err := o.s3.c.HeadObject(&req)
if err != nil {
fs.Debug(o, "Failed to read info: %s", err)
return err
@@ -469,19 +533,17 @@ func (o *FsObjectS3) readMetaData() (err error) {
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if contentLength, ok := headers["Content-Length"]; ok {
size, err = strconv.ParseInt(contentLength, 10, 64)
if err != nil {
fs.Debug(o, "Failed to read size from: %q", headers)
return err
}
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = headers["Etag"]
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = headers
if o.lastModified, err = time.Parse(http.TimeFormat, headers["Last-Modified"]); err != nil {
o.meta = resp.Metadata
if resp.LastModified == nil {
fs.Log(o, "Failed to read last modified from HEAD: %s", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
return nil
}
@@ -498,11 +560,11 @@ func (o *FsObjectS3) ModTime() time.Time {
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok {
if !ok || d == nil {
// fs.Debug(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %s", err)
return o.lastModified
@@ -515,14 +577,27 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read metadata: %s", err)
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.s3.b.Update(o.s3.root+o.remote, o.s3.perm, o.meta)
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
// Copy the object to itself to update the metadata
key := o.s3.root + o.remote
sourceKey := o.s3.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
Bucket: &o.s3.bucket,
ACL: &o.s3.perm,
Key: &key,
CopySource: &sourceKey,
Metadata: o.meta,
MetadataDirective: &directive,
}
_, err = o.s3.c.CopyObject(&req)
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
}
@@ -533,21 +608,51 @@ func (o *FsObjectS3) Storable() bool {
// Open an object for read
func (o *FsObjectS3) Open() (in io.ReadCloser, err error) {
in, err = o.s3.b.GetReader(o.s3.root + o.remote)
return
key := o.s3.root + o.remote
req := s3.GetObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
resp, err := o.s3.c.GetObject(&req)
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update the Object from in with modTime and size
func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
// Set the mtime in the headers
headers := s3.Headers{
metaMtime: swift.TimeToFloatString(modTime),
opts := s3manager.UploadOptions{
// PartSize: 64 * 1024 * 1024, use default
Concurrency: 2, // limit concurrency
LeavePartsOnError: false,
S3: o.s3.c,
}
uploader := s3manager.NewUploader(&opts)
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
_, err := o.s3.b.PutReaderHeaders(o.s3.root+o.remote, in, size, fs.MimeType(o), o.s3.perm, headers)
// Guess the content type
contentType := fs.MimeType(o)
key := o.s3.root + o.remote
req := s3manager.UploadInput{
Bucket: &o.s3.bucket,
ACL: &o.s3.perm,
Key: &key,
Body: in,
ContentType: &contentType,
Metadata: metadata,
//ContentLength: &size,
}
_, err := uploader.Upload(&req)
if err != nil {
return err
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
@@ -556,7 +661,13 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error {
// Remove an object
func (o *FsObjectS3) Remove() error {
return o.s3.b.Del(o.s3.root + o.remote)
key := o.s3.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &o.s3.bucket,
Key: &key,
}
_, err := o.s3.c.DeleteObject(&req)
return err
}
// Check the interfaces are satisfied

115
s3/v2sign.go Normal file
View File

@@ -0,0 +1,115 @@
// v2 signing
package s3
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"sort"
"strings"
"time"
)
// URL parameters that need to be added to the signature
var s3ParamsToSign = map[string]struct{}{
"acl": struct{}{},
"location": struct{}{},
"logging": struct{}{},
"notification": struct{}{},
"partNumber": struct{}{},
"policy": struct{}{},
"requestPayment": struct{}{},
"torrent": struct{}{},
"uploadId": struct{}{},
"uploads": struct{}{},
"versionId": struct{}{},
"versioning": struct{}{},
"versions": struct{}{},
"response-content-type": struct{}{},
"response-content-language": struct{}{},
"response-expires": struct{}{},
"response-cache-control": struct{}{},
"response-content-disposition": struct{}{},
"response-content-encoding": struct{}{},
}
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func sign(AccessKey, SecretKey string, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
// Sort out URI
uri := req.URL.Opaque
if uri != "" {
if strings.HasPrefix(uri, "//") {
// Strip off //host/uri
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
req.URL.Opaque = uri // reset to plain URI otherwise Ceph gets confused
}
} else {
uri = req.URL.Path
}
if uri == "" {
uri = "/"
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign []string
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
contentType = v[0]
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}
// Look for query parameters which need to be added to the signature
params := req.URL.Query()
var queriesToSign []string
for k, vs := range params {
if _, ok := s3ParamsToSign[k]; ok {
for _, v := range vs {
if v == "" {
queriesToSign = append(queriesToSign, k)
} else {
queriesToSign = append(queriesToSign, k+"="+v)
}
}
}
}
// Add query parameters to URI
if len(queriesToSign) > 0 {
sort.StringSlice(queriesToSign).Sort()
uri += "?" + strings.Join(queriesToSign, "&")
}
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(SecretKey))
hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
}

View File

@@ -230,7 +230,7 @@ func (f *FsSwift) list(directories bool, fn func(string, *swift.Object)) {
})
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't read container %q: %s", f.container, err)
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
}
}
@@ -241,7 +241,7 @@ func (f *FsSwift) List() fs.ObjectsChan {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.Log(f, "Can't list objects at root - choose a container using lsd")
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
} else {
// List the objects
go func() {
@@ -266,7 +266,7 @@ func (f *FsSwift) ListDir() fs.DirChan {
containers, err := f.c.ContainersAll(nil)
if err != nil {
fs.Stats.Error()
fs.Log(f, "Couldn't list containers: %v", err)
fs.ErrorLog(f, "Couldn't list containers: %v", err)
} else {
for _, container := range containers {
out <- &fs.Dir{
@@ -393,14 +393,14 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) {
err := o.readMetaData()
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to read metadata: %s", err)
fs.ErrorLog(o, "Failed to read metadata: %s", err)
return
}
o.meta.SetModTime(modTime)
err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, o.meta.ObjectHeaders())
if err != nil {
fs.Stats.Error()
fs.Log(o, "Failed to update remote mtime: %s", err)
fs.ErrorLog(o, "Failed to update remote mtime: %s", err)
}
}