1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 12:23:15 +00:00

Compare commits

...

64 Commits

Author SHA1 Message Date
Nick Craig-Wood
2b1194c57e rc: update docs with new methods 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
e6dd121f52 config: add rc operations for config 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
e600217666 config: create config directory on save if it is missing 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
bc17ca7ed9 rc: implement core/obscure 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
1916410316 rc: add core/version and put definitions next to implementations 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
dddfbec92a cmd/version: factor version number parsing routines into fs/version 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
75a88de55c rc/rcserver: with --rc-files if auth set, pass on to URL opened
If `--rc-user` or `--rc-pass` is set then the URL that is opened with
`--rc-files` will have the authorization in the URL in the
`http://user:pass@localhost/` style.
2018-11-05 15:44:40 +00:00
Nick Craig-Wood
2466f4d152 sync: add rc commands for sync/copy/move 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
39283c8a35 operations: implement operations remote control commands 2018-11-05 15:44:40 +00:00
Nick Craig-Wood
46c2f55545 copyurl: factor code into operations and write test 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
fc2afcbcbd lsjson: factor internals of lsjson command into operations 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
fa0a9653d2 rc: methods marked as AuthRequired need auth unless --rc-no-auth
Methods which can read or mutate external storage will require
authorisation - enforce this.  This can be overidden by `--rc-no-auth`.
2018-11-04 20:42:57 +00:00
Nick Craig-Wood
181267e20e cmd/rc: add --user and --pass flags and interpret --rc-user, --rc-pass, --rc-addr 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
75e8ea383c rc: implement rc.PutCachedFs for prefilling the remote cache 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
8c8b58a7de rc: expire remote cache and fix tests under race detector 2018-11-04 20:42:57 +00:00
Nick Craig-Wood
b961e07c57 rc: ensure rclone fails to start up if the --rc port is in use already 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
0b80d1481a cache: make tests not start an rc but use the internal framework 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
89550e7121 rcserver: serve directories as well as files 2018-11-04 15:11:51 +00:00
Nick Craig-Wood
370c218c63 cmd/http: factor directory serving routines into httplib/serve and write tests 2018-11-04 12:46:44 +00:00
Nick Craig-Wood
b972dcb0ae rc: implement options/blocks,get,set and register options 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
0bfa9811f7 rc: factor server code into rcserver and implement serving objects
If a GET or HEAD request is receivied with a URL parameter of fs then
it will be served from that remote.
2018-11-03 11:32:00 +00:00
Nick Craig-Wood
aa9b2c31f4 serve/restic: factor object serving into cmd/httplib/serve 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
cff75db6a4 rcd: implement new command just to serve the remote control API 2018-11-03 11:32:00 +00:00
Nick Craig-Wood
75252e4a89 rc: add --rc-files flag to serve files on the rc http server
This enables building a browser based UI for rclone
2018-11-03 11:32:00 +00:00
Nick Craig-Wood
2089405e1b fs/rc: add more infrastructure to help writing rc functions
- Fs cache for rc commands
- Helper functions for parsing the input
- Reshape command for manipulating JSON blobs
- Background Job starting, control, query and expiry
2018-11-02 17:32:20 +00:00
Nick Craig-Wood
a379eec9d9 fstest/mockfs: create mock fs.Fs for testing 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
45d5339fcb cmd/rc: add --json flag for structured JSON input 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
bb5637d46a serve http, webdav, restic: ensure rclone exits if the port is in use 2018-11-02 17:32:20 +00:00
Nick Craig-Wood
1f05d5bf4a delete: clarify that it only deletes files not directories 2018-11-02 17:07:45 +00:00
HerrH
ff87da9c3b Added some more links for easier finding
Expanded the Installation & Docs section with links to the website and added a link to the full list of storage providers and features.
2018-11-02 16:56:20 +00:00
ssaqua
3d81b75f44 dedupe: check for existing filename before renaming a dupe file 2018-11-02 16:51:52 +00:00
Nick Craig-Wood
baba6d67e6 s3: set ACL for server side copies to that provided by the user - fixes #2691
Before this change the ACL for objects which were server side copied
was left at the default "private" settings. S3 doesn't copy the ACL
from the source when you copy an object, you have to set it afresh
which is what this does.
2018-11-02 16:22:31 +00:00
Nick Craig-Wood
04c0564fe2 Add Ralf Hemberger to contributors 2018-11-02 09:53:23 +00:00
Ralf Hemberger
91cfdb81f5 change spaces to tab 2018-11-02 09:50:34 +00:00
Ralf Hemberger
deae7bf33c WebDav - Add RFC3339 date format - fixes #2712 2018-11-02 09:50:34 +00:00
Henning Surmeier
04a0da1f92 ncdu: remove option ('d' key)
delete files by pressing 'd' in the ncdu listing

GUI Improvements:
Boxes now have a border around them
Boxes can ask questions and allow the selection of options. The
selected option will be given to the UI.boxMenuHandler function.

Fixes #2571
2018-10-28 20:44:03 +00:00
Henning Surmeier
9486df0226 ncdu/scan: remove option for memory representation
Remove files/directories from the in memory structs of the cloud
directory. Size and Count will be recalculated and populated upwards
to the parent directories.
2018-10-28 20:44:03 +00:00
Nick Craig-Wood
948a5d25c2 operations: Fix Purge and Rmdirs when dir is not empty
Before this change, Purge on the fallback path would try to delete
directories starting from the root rather than the dir passed in.
Rmdirs would also attempt to delete the root.
2018-10-27 11:51:17 +01:00
Nick Craig-Wood
f7c31cd210 Add Florian Gamboeck to contributors 2018-10-27 00:28:11 +01:00
Florian Gamboeck
696e7b2833 backend/cache: Print correct info about Cache Writes 2018-10-27 00:27:47 +01:00
Anagh Kumar Baranwal
e76cf1217f Added docs to check for key generation on Mega
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-25 22:49:21 +01:00
Nick Craig-Wood
543e37f662 Require go1.8 for compilation 2018-10-25 17:06:33 +01:00
Nick Craig-Wood
c514cb752d vendor: update to latest versions of everything 2018-10-25 17:06:33 +01:00
Nick Craig-Wood
c0ca93ae6f opendrive: fix retries of upload chunks - fixes #2646
Before this change, upload chunks were being emptied on retry.  This
change introduces a RepeatableReader to fix the problem.
2018-10-25 11:50:38 +01:00
Nick Craig-Wood
38a89d49ae fstest/test_all: tidy HTML report
- link test number to online copy
- style links
- attempt to make a nicer colour scheme
2018-10-25 11:33:17 +01:00
Anagh Kumar Baranwal
6531126eb2 Fixes the rc docs creation
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-25 11:29:59 +01:00
Nick Craig-Wood
25d0e59ef8 fstest/test_all: make sure Version is correct in build 2018-10-25 08:36:09 +01:00
Nick Craig-Wood
b0db08fd2b fstest/test_all: constrain to go1.10 and above 2018-10-24 21:33:42 +01:00
Nick Craig-Wood
07addf74fd fstest/test_all: upload a copy of the report to "current" 2018-10-24 12:21:07 +01:00
Nick Craig-Wood
52c7c738ca fstest/test_all: limit concurrency and run tests in random order 2018-10-24 10:46:58 +01:00
Nick Craig-Wood
5c32b32011 fstest/test_all: fix directories that tests are run in
- Don't build a binary for backend tests
- Run tests in their relevant directories
2018-10-23 17:31:11 +01:00
Nick Craig-Wood
fe61cff079 crypt: ensure integration tests run correctly when -remote is set 2018-10-23 17:12:38 +01:00
Nick Craig-Wood
fbab1e55bb fstest/test_all: adapt to nested test definitions 2018-10-23 16:56:35 +01:00
Nick Craig-Wood
1bfd07567e fstest/test_all: add oneonly flag to only run one test per backend if required 2018-10-23 14:07:48 +01:00
Nick Craig-Wood
f97c4c8d9d fstest/test_all: rework integration tests to improve output
- Make integration tests use a config file
- Output individual logs for each test
- Make HTML report and open browser
- Optionally email and upload results
2018-10-23 14:07:48 +01:00
Anagh Kumar Baranwal
a3c55462a8 Set python version explicitly to 2 to avoid issues on systems where
the default python version is `3`

Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-23 12:14:52 +01:00
Anagh Kumar Baranwal
bbb9a504a8 Added docs to use the -P/--progress flag for real time statistics
Signed-off-by: Anagh Kumar Baranwal <anaghk.dos@gmail.com>
2018-10-23 12:14:52 +01:00
Jon Fautley
dedc7d885c sftp: Ensure file hash checking is really disabled 2018-10-23 12:03:50 +01:00
Nick Craig-Wood
c5ac96e9e7 Make --files-from only read the objects specified and don't scan directories
Before this change using --files-from would scan all the directories
that the files could possibly be in causing rclone to do more work
that was necessary.

After this change, rclone constructs an in memory tree using the
--fast-list mechanism but from all of the files in the --files-from
list and without scanning any directories.

Any objects that are not found in the --files-from list are ignored
silently.

This mechanism is used for sync/copy/move (march) and all of the
listing commands ls/lsf/md5sum/etc (walk).
2018-10-20 18:13:31 +01:00
Nick Craig-Wood
9959c5f17f webdav: add Content-Type to PUT requests - fixes #2664 2018-10-18 13:18:24 +01:00
Nick Craig-Wood
e8d0a363fc opendrive: fix transfer of files with + and & in - fixes #2657 2018-10-17 14:22:04 +01:00
albertony
935b7c1c0f jottacloud: fix bug in --fast-list handing of empty folders - fixes #2650 2018-10-17 13:58:36 +01:00
Fabian Möller
15ce0ae57c fstests: fix maximum tested size in TestFsPutChunked
Before this it was possible hat maxChunkSize was incorrectly set to 200.
2018-10-16 11:50:47 +02:00
Nick Craig-Wood
67703a73de Start v1.44-DEV development 2018-10-15 12:33:27 +01:00
340 changed files with 28689 additions and 16553 deletions

View File

@@ -4,7 +4,6 @@ dist: trusty
os:
- linux
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x

View File

@@ -123,6 +123,13 @@ but they can be run against any of the remotes.
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the
project root:
go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive
If you want to run all the integration tests against all the remotes,
then change into the project root and run
@@ -343,7 +350,7 @@ Unit tests
Integration tests
* Add your fs to `fstest/test_all/test_all.go`
* Add your backend to `fstest/test_all/config.yaml`
* Make sure integration tests pass with
* `cd fs/operations`
* `go test -v -remote TestRemote:`

View File

@@ -50,10 +50,9 @@ version:
# Full suite of integration tests
test: rclone
go install github.com/ncw/rclone/fstest/test_all
-go test -v -count 1 -timeout 20m $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
@echo "Written logs in test.log and fs/test_all.log"
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
# Quick test
quicktest:

View File

@@ -55,6 +55,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
## Features
@@ -71,10 +73,15 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Installation & documentation
Please see the rclone website for installation, usage, documentation,
changelog and configuration walkthroughs.
Please see the [rclone website](https://rclone.org/) for:
* https://rclone.org/
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads

View File

@@ -32,6 +32,21 @@ Early in the next release cycle update the vendored dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes

View File

@@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
@@ -1368,7 +1368,7 @@ func (o *Object) SetTier(tier string) error {
blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier)
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
return o.fs.shouldRetry(err)
})

View File

@@ -471,7 +471,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
if !f.opt.StoreWrites {
if f.opt.StoreWrites {
fs.Infof(name, "Cache Writes: enabled")
}

View File

@@ -5,14 +5,12 @@ package cache_test
import (
"bytes"
"encoding/base64"
"encoding/json"
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"path"
"path/filepath"
@@ -32,11 +30,11 @@ import (
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -692,8 +690,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
rcflags.Opt.Enabled = true
rc.Start(&rcflags.Opt)
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
@@ -726,13 +724,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
m := make(map[string]string)
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res.Body.Close()
}()
_ = json.NewDecoder(res.Body).Decode(&m)
// Call the rc function
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
@@ -752,13 +745,8 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
li1, err = runInstance.list(t, rootFs, "")
require.Len(t, li1, 1)
m = make(map[string]string)
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
require.NoError(t, err)
defer func() {
_ = res2.Body.Close()
}()
_ = json.NewDecoder(res2.Body).Decode(&m)
// Call the rc function
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])

View File

@@ -7,13 +7,30 @@ import (
"testing"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
})
}
// TestStandard runs integration tests against the remote
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
name := "TestCrypt"
fstests.Run(t, &fstests.Opt{
@@ -30,6 +47,9 @@ func TestStandard(t *testing.T) {
// TestOff runs integration tests against the remote
func TestOff(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
name := "TestCrypt2"
fstests.Run(t, &fstests.Opt{
@@ -46,6 +66,9 @@ func TestOff(t *testing.T) {
// TestObfuscate runs integration tests against the remote
func TestObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt3"
fstests.Run(t, &fstests.Opt{

View File

@@ -464,12 +464,12 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
if folder.Deleted {
return nil
}
folderPath := path.Join(folder.Path, folder.Name)
remoteDirLength := len(folderPath) - pathPrefixLength
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if remoteDirLength > 0 {
remoteDir = restoreReservedChars(folderPath[pathPrefixLength+1:])
if remoteDirLength > startPathLength {
if folderPathLength > pathPrefixLength {
remoteDir = folderPath[pathPrefixLength+1:]
if folderPathLength > startPathLength {
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
err := fn(d)
if err != nil {

View File

@@ -6,6 +6,7 @@ import (
"io"
"mime/multipart"
"net/http"
"net/url"
"path"
"strconv"
"strings"
@@ -20,6 +21,7 @@ import (
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors"
)
@@ -930,8 +932,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
// 1 MB chunks size
// 10 MB chunks size
chunkSize := int64(1024 * 1024 * 10)
buf := make([]byte, int(chunkSize))
chunkOffset := int64(0)
remainingBytes := size
chunkCounter := 0
@@ -944,14 +947,19 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
remainingBytes -= currentChunkSize
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, currentChunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, err
}
var formBody bytes.Buffer
w := multipart.NewWriter(&formBody)
fw, err := w.CreateFormFile("file_data", o.remote)
if err != nil {
return false, err
}
if _, err = io.CopyN(fw, in, currentChunkSize); err != nil {
if _, err = io.Copy(fw, chunk); err != nil {
return false, err
}
// Add session_id
@@ -1082,7 +1090,7 @@ func (o *Object) readMetaData() (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + rest.URLPathEscape(replaceReservedChars(leaf)),
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
}
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err)

View File

@@ -448,7 +448,12 @@ func init() {
Provider: "!AWS,IBMCOS",
}, {
Name: "acl",
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
@@ -1286,6 +1291,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{
Bucket: &f.bucket,
ACL: &f.opt.ACL,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),

View File

@@ -769,6 +769,10 @@ func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
if o.fs.opt.DisableHashCheck {
return "", nil
}
c, err := o.fs.getSftpConnection()
if err != nil {
return "", errors.Wrap(err, "Hash get SFTP connection")

View File

@@ -145,6 +145,7 @@ var timeFormats = []string{
time.RFC1123Z, // Fri, 05 Jan 2018 14:14:38 +0000 (as used by mydrive.ch)
time.UnixDate, // Wed May 17 15:31:58 UTC 2017 (as used in an internal server)
noZerosRFC1123, // Fri, 7 Sep 2018 08:49:58 GMT (as used by server in #2574)
time.RFC3339, // Wed, 31 Oct 2018 13:57:11 CET (as used by komfortcloud.de)
}
// UnmarshalXML turns XML into a Time

View File

@@ -968,6 +968,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
Body: in,
NoResponse: true,
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(src),
}
if o.fs.useOCMtime {
opts.ExtraHeaders = map[string]string{

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python2
"""
Make backend documentation
"""

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python2
"""
Make single page versions of the documentation for release and
conversion into man pages etc.

View File

@@ -4,18 +4,20 @@
set -e
go install
mkdir -p /tmp/rclone_cache_test
mkdir -p /tmp/rclone/cache_test
mkdir -p /tmp/rclone/rc_mount
export RCLONE_CONFIG_RCDOCS_TYPE=cache
export RCLONE_CONFIG_RCDOCS_REMOTE=/tmp/rclone/cache_test
rclone -q --rc mount rcdocs: /mnt/tmp/ &
rclone -q --rc mount rcdocs: /tmp/rclone/rc_mount &
sleep 0.5
rclone rc > /tmp/z.md
fusermount -z -u /mnt/tmp/
rclone rc > /tmp/rclone/z.md
fusermount -u -z /tmp/rclone/rc_mount > /dev/null 2>&1 || umount /tmp/rclone/rc_mount
awk '
BEGIN {p=1}
/^<!--- autogenerated start/ {print;system("cat /tmp/z.md");p=0}
/^<!--- autogenerated start/ {print;system("cat /tmp/rclone/z.md");p=0}
/^<!--- autogenerated stop/ {p=1}
p' docs/content/rc.md > /tmp/rc.md
p' docs/content/rc.md > /tmp/rclone/rc.md
mv /tmp/rc.md docs/content/rc.md
mv /tmp/rclone/rc.md docs/content/rc.md
rm -rf /tmp/rclone

View File

@@ -43,6 +43,7 @@ import (
_ "github.com/ncw/rclone/cmd/purge"
_ "github.com/ncw/rclone/cmd/rc"
_ "github.com/ncw/rclone/cmd/rcat"
_ "github.com/ncw/rclone/cmd/rcd"
_ "github.com/ncw/rclone/cmd/reveal"
_ "github.com/ncw/rclone/cmd/rmdir"
_ "github.com/ncw/rclone/cmd/rmdirs"

View File

@@ -29,8 +29,8 @@ import (
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fspath"
fslog "github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fs/rc/rcserver"
"github.com/ncw/rclone/lib/atexit"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -352,8 +352,11 @@ func initConfig() {
// Write the args for debug purposes
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
// Start the remote control if configured
rc.Start(&rcflags.Opt)
// Start the remote control server if configured
_, err = rcserver.Start(&rcflags.Opt)
if err != nil {
log.Fatalf("Failed to start remote control: %v", err)
}
// Setup CPU profiling if desired
if *cpuProfile != "" {

View File

@@ -1,8 +1,11 @@
package config
import (
"errors"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/rc"
"github.com/spf13/cobra"
)
@@ -93,7 +96,16 @@ you would do:
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
return config.CreateRemote(args[0], args[1], args[2:])
in, err := argsToMap(args[2:])
if err != nil {
return err
}
err = config.CreateRemote(args[0], args[1], in)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
}
@@ -110,7 +122,16 @@ For example to update the env_auth field of a remote of name myremote you would
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
return config.UpdateRemote(args[0], args[1:])
in, err := argsToMap(args[1:])
if err != nil {
return err
}
err = config.UpdateRemote(args[0], in)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
}
@@ -136,6 +157,29 @@ For example to set password of a remote of name myremote you would do:
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
return config.PasswordRemote(args[0], args[1:])
in, err := argsToMap(args[1:])
if err != nil {
return err
}
err = config.PasswordRemote(args[0], in)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
}
// This takes a list of arguments in key value key value form and
// converts it into a map
func argsToMap(args []string) (out rc.Params, err error) {
if len(args)%2 != 0 {
return nil, errors.New("found key without value")
}
out = rc.Params{}
// Set the config
for i := 0; i < len(args); i += 2 {
out[args[i]] = args[i+1]
}
return out, nil
}

View File

@@ -50,6 +50,8 @@ If you are familiar with ` + "`rsync`" + `, rclone always works as if you had
written a trailing / - meaning "copy the contents of this directory".
This applies to all commands and whether you are talking about the
source or destination.
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -40,6 +40,8 @@ This will:
This doesn't transfer unchanged files, testing by size and
modification time or MD5SUM. It doesn't delete files from the
destination.
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -1,9 +1,6 @@
package copyurl
import (
"net/http"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -25,14 +22,7 @@ without saving it in tmp storage.
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
cmd.Run(true, true, command, func() error {
resp, err := http.Get(args[0])
if err != nil {
return err
}
_, err = operations.RcatSize(fsdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
_, err := operations.CopyURL(fsdst, dstFileName, args[0])
return err
})
},

View File

@@ -14,9 +14,13 @@ var commandDefintion = &cobra.Command{
Use: "delete remote:path",
Short: `Remove the contents of path.`,
Long: `
Remove the contents of path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
Remove the files in path. Unlike ` + "`" + `purge` + "`" + ` it obeys include/exclude
filters so can be used to selectively delete files.
` + "`" + `rclone delete` + "`" + ` only deletes objects but leaves the directory structure
alone. If you want to delete a directory and all of its contents use
` + "`" + `rclone purge` + "`" + `
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)

View File

@@ -3,62 +3,26 @@ package lsjson
import (
"encoding/json"
"fmt"
"log"
"os"
"path"
"time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ls/lshelp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
recurse bool
showHash bool
showEncrypted bool
showOrigIDs bool
noModTime bool
opt operations.ListJSONOpt
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
commandDefintion.Flags().BoolVarP(&showHash, "hash", "", false, "Include hashes in the output (may take longer).")
commandDefintion.Flags().BoolVarP(&noModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
commandDefintion.Flags().BoolVarP(&showEncrypted, "encrypted", "M", false, "Show the encrypted names.")
commandDefintion.Flags().BoolVarP(&showOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
}
// lsJSON in the struct which gets marshalled for each line
type lsJSON struct {
Path string
Name string
Encrypted string `json:",omitempty"`
Size int64
MimeType string `json:",omitempty"`
ModTime Timestamp //`json:",omitempty"`
IsDir bool
Hashes map[string]string `json:",omitempty"`
ID string `json:",omitempty"`
OrigID string `json:",omitempty"`
}
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON
func (t Timestamp) MarshalJSON() (out []byte, err error) {
tt := time.Time(t)
if tt.IsZero() {
return []byte(`""`), nil
}
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
commandDefintion.Flags().BoolVarP(&opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
commandDefintion.Flags().BoolVarP(&opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
}
var commandDefintion = &cobra.Command{
@@ -104,107 +68,27 @@ can be processed line by line as each item is written one to a line.
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
var cipher crypt.Cipher
if showEncrypted {
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
if err != nil {
log.Fatalf(err.Error())
}
if fsInfo.Name != "crypt" {
log.Fatalf("The remote needs to be of type \"crypt\"")
}
cipher, err = crypt.NewCipher(config)
if err != nil {
log.Fatalf(err.Error())
}
}
cmd.Run(false, false, command, func() error {
fmt.Println("[")
first := true
err := walk.Walk(fsrc, "", false, operations.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
err := operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
out, err := json.Marshal(item)
if err != nil {
fs.CountError(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
return errors.Wrap(err, "failed to marshal list object")
}
for _, entry := range entries {
item := lsJSON{
Path: entry.Remote(),
Name: path.Base(entry.Remote()),
Size: entry.Size(),
MimeType: fs.MimeTypeDirEntry(entry),
}
if !noModTime {
item.ModTime = Timestamp(entry.ModTime())
}
if cipher != nil {
switch entry.(type) {
case fs.Directory:
item.Encrypted = cipher.EncryptDirName(path.Base(entry.Remote()))
case fs.Object:
item.Encrypted = cipher.EncryptFileName(path.Base(entry.Remote()))
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
}
if do, ok := entry.(fs.IDer); ok {
item.ID = do.ID()
}
if showOrigIDs {
cur := entry
for {
u, ok := cur.(fs.ObjectUnWrapper)
if !ok {
break // not a wrapped object, use current id
}
next := u.UnWrap()
if next == nil {
break // no base object found, use current id
}
cur = next
}
if do, ok := cur.(fs.IDer); ok {
item.OrigID = do.ID()
}
}
switch x := entry.(type) {
case fs.Directory:
item.IsDir = true
case fs.Object:
item.IsDir = false
if showHash {
item.Hashes = make(map[string]string)
for _, hashType := range x.Fs().Hashes().Array() {
hash, err := x.Hash(hashType)
if err != nil {
fs.Errorf(x, "Failed to read hash: %v", err)
} else if hash != "" {
item.Hashes[hashType.String()] = hash
}
}
}
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
out, err := json.Marshal(item)
if err != nil {
return errors.Wrap(err, "failed to marshal list object")
}
if first {
first = false
} else {
fmt.Print(",\n")
}
_, err = os.Stdout.Write(out)
if err != nil {
return errors.Wrap(err, "failed to write to output")
}
if first {
first = false
} else {
fmt.Print(",\n")
}
_, err = os.Stdout.Write(out)
if err != nil {
return errors.Wrap(err, "failed to write to output")
}
return nil
})
if err != nil {
return errors.Wrap(err, "error listing JSON")
return err
}
if !first {
fmt.Println()

View File

@@ -39,6 +39,8 @@ If you want to delete empty source directories after move, use the --delete-empt
**Important**: Since this can cause data loss, test first with the
--dry-run flag.
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -43,6 +43,8 @@ transfer.
**Important**: Since this can cause data loss, test first with the
--dry-run flag.
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -13,6 +13,7 @@ import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/ncdu/scan"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
termbox "github.com/nsf/termbox-go"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@@ -42,8 +43,11 @@ Here are the keys - press '?' to toggle the help on and off
` + strings.Join(helpText[1:], "\n ") + `
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
rclone remotes. It is missing lots of features at the moment, most
importantly deleting files, but is useful as it stands.
rclone remotes. It is missing lots of features at the moment
but is useful as it stands.
Note that it might take some time to delete big files/folders. The
UI won't respond in the meantime since the deletion is done synchronously.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
@@ -63,6 +67,7 @@ var helpText = []string{
" c toggle counts",
" g toggle graph",
" n,s,C sort by name,size,count",
" d delete file/directory",
" ^L refresh screen",
" ? to toggle help on and off",
" q/ESC/c-C to quit",
@@ -70,24 +75,27 @@ var helpText = []string{
// UI contains the state of the user interface
type UI struct {
f fs.Fs // fs being displayed
fsName string // human name of Fs
root *scan.Dir // root directory
d *scan.Dir // current directory being displayed
path string // path of current directory
showBox bool // whether to show a box
boxText []string // text to show in box
entries fs.DirEntries // entries of current directory
sortPerm []int // order to display entries in after sorting
invSortPerm []int // inverse order
dirListHeight int // height of listing
listing bool // whether listing is in progress
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByCount int8
dirPosMap map[string]dirPos // store for directory positions
f fs.Fs // fs being displayed
fsName string // human name of Fs
root *scan.Dir // root directory
d *scan.Dir // current directory being displayed
path string // path of current directory
showBox bool // whether to show a box
boxText []string // text to show in box
boxMenu []string // box menu options
boxMenuButton int
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
entries fs.DirEntries // entries of current directory
sortPerm []int // order to display entries in after sorting
invSortPerm []int // inverse order
dirListHeight int // height of listing
listing bool // whether listing is in progress
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByCount int8
dirPosMap map[string]dirPos // store for directory positions
}
// Where we have got to in the directory listing
@@ -130,6 +138,54 @@ func Linef(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, format string,
Line(x, y, xmax, fg, bg, spacer, s)
}
// LineOptions Print line of selectable options
func LineOptions(x, y, xmax int, fg, bg termbox.Attribute, options []string, selected int) {
defaultBg := bg
defaultFg := fg
// Print left+right whitespace to center the options
xoffset := ((xmax - x) - lineOptionLength(options)) / 2
for j := x; j < x+xoffset; j++ {
termbox.SetCell(j, y, ' ', fg, bg)
}
for j := xmax - xoffset; j < xmax; j++ {
termbox.SetCell(j, y, ' ', fg, bg)
}
x += xoffset
for i, o := range options {
termbox.SetCell(x, y, ' ', fg, bg)
if i == selected {
bg = termbox.ColorBlack
fg = termbox.ColorWhite
}
termbox.SetCell(x+1, y, '<', fg, bg)
x += 2
// print option text
for _, c := range o {
termbox.SetCell(x, y, c, fg, bg)
x++
}
termbox.SetCell(x, y, '>', fg, bg)
bg = defaultBg
fg = defaultFg
termbox.SetCell(x+1, y, ' ', fg, bg)
x += 2
}
}
func lineOptionLength(o []string) int {
count := 0
for _, i := range o {
count += len(i)
}
return count + 4*len(o) // spacer and arrows <entry>
}
// Box the u.boxText onto the screen
func (u *UI) Box() {
w, h := termbox.Size()
@@ -147,6 +203,15 @@ func (u *UI) Box() {
x := (w - boxWidth) / 2
y := (h - boxHeight) / 2
xmax := x + boxWidth
if len(u.boxMenu) != 0 {
count := lineOptionLength(u.boxMenu)
if x+boxWidth > x+count {
xmax = x + boxWidth
} else {
xmax = x + count
}
}
ymax := y + len(u.boxText)
// draw text
fg, bg := termbox.ColorRed, termbox.ColorWhite
@@ -155,7 +220,43 @@ func (u *UI) Box() {
fg = termbox.ColorBlack
}
// FIXME draw a box around
if len(u.boxMenu) != 0 {
ymax++
LineOptions(x, ymax-1, xmax, fg, bg, u.boxMenu, u.boxMenuButton)
}
// draw top border
for i := y; i < ymax; i++ {
termbox.SetCell(x-1, i, '│', fg, bg)
termbox.SetCell(xmax, i, '│', fg, bg)
}
for j := x; j < xmax; j++ {
termbox.SetCell(j, y-1, '─', fg, bg)
termbox.SetCell(j, ymax, '─', fg, bg)
}
termbox.SetCell(x-1, y-1, '┌', fg, bg)
termbox.SetCell(xmax, y-1, '┐', fg, bg)
termbox.SetCell(x-1, ymax, '└', fg, bg)
termbox.SetCell(xmax, ymax, '┘', fg, bg)
}
func (u *UI) moveBox(to int) {
if len(u.boxMenu) == 0 {
return
}
if to > 0 { // move right
u.boxMenuButton++
} else { // move left
u.boxMenuButton--
}
if u.boxMenuButton >= len(u.boxMenu) {
u.boxMenuButton = len(u.boxMenu) - 1
} else if u.boxMenuButton < 0 {
u.boxMenuButton = 0
}
}
// find the biggest entry in the current listing
@@ -314,6 +415,57 @@ func (u *UI) move(d int) {
u.dirPosMap[u.path] = dirPos
}
func (u *UI) removeEntry(pos int) {
u.d.Remove(pos)
u.setCurrentDir(u.d)
}
// delete the entry at the current position
func (u *UI) delete() {
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
entry := u.entries[dirPos]
file := false
d, _ := u.d.GetDir(dirPos)
if d == nil {
file = true
}
u.boxMenu = []string{"cancel", "confirm"}
if file {
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
if o != 1 {
return "Aborted!", nil
}
err := f.Rmdir(entry.String())
if err != nil {
return "", err
}
u.removeEntry(dirPos)
return "Successfully deleted file!", nil
}
u.popupBox([]string{
"Delete this file?",
u.fsName + entry.String()})
} else {
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
if o != 1 {
return "Aborted!", nil
}
err := operations.Purge(f, entry.String())
if err != nil {
return "", err
}
u.removeEntry(dirPos)
return "Successfully purged folder!", nil
}
u.popupBox([]string{
"Purge this directory?",
"ALL files in it will be deleted",
u.fsName + entry.String()})
}
}
// Sort by the configured sort method
type ncduSort struct {
sortPerm []int
@@ -405,6 +557,25 @@ func (u *UI) enter() {
u.setCurrentDir(d)
}
// handles a box option that was selected
func (u *UI) handleBoxOption() {
msg, err := u.boxMenuHandler(u.f, u.path, u.boxMenuButton)
// reset
u.boxMenuButton = 0
u.boxMenu = []string{}
u.boxMenuHandler = nil
if err != nil {
u.popupBox([]string{
"error:",
err.Error(),
})
return
}
u.popupBox([]string{"Finished:", msg})
}
// up goes up to the parent directory
func (u *UI) up() {
if u.d == nil {
@@ -524,8 +695,22 @@ outer:
case termbox.KeyPgup, '=', '+':
u.move(-u.dirListHeight)
case termbox.KeyArrowLeft, 'h':
if u.showBox {
u.moveBox(-1)
break
}
u.up()
case termbox.KeyArrowRight, 'l', termbox.KeyEnter:
case termbox.KeyEnter:
if len(u.boxMenu) > 0 {
u.handleBoxOption()
break
}
u.enter()
case termbox.KeyArrowRight, 'l':
if u.showBox {
u.moveBox(1)
break
}
u.enter()
case 'c':
u.showCounts = !u.showCounts
@@ -537,6 +722,8 @@ outer:
u.toggleSort(&u.sortBySize)
case 'C':
u.toggleSort(&u.sortByCount)
case 'd':
u.delete()
case '?':
u.togglePopupBox(helpText)

View File

@@ -70,6 +70,45 @@ func (d *Dir) Entries() fs.DirEntries {
return append(fs.DirEntries(nil), d.entries...)
}
// Remove removes the i-th entry from the
// in-memory representation of the remote directory
func (d *Dir) Remove(i int) {
d.mu.Lock()
defer d.mu.Unlock()
d.remove(i)
}
// removes the i-th entry from the
// in-memory representation of the remote directory
//
// Call with d.mu held
func (d *Dir) remove(i int) {
size := d.entries[i].Size()
count := int64(1)
subDir, ok := d.getDir(i)
if ok {
size = subDir.size
count = subDir.count
delete(d.dirs, path.Base(subDir.path))
}
d.size -= size
d.count -= count
d.entries = append(d.entries[:i], d.entries[i+1:]...)
dir := d
// populate changed size and count to parent(s)
for parent := d.parent; parent != nil; parent = parent.parent {
parent.mu.Lock()
parent.dirs[path.Base(dir.path)] = dir
parent.size -= size
parent.count -= count
dir = parent
parent.mu.Unlock()
}
}
// gets the directory of the i-th entry
//
// returns nil if it is a file

View File

@@ -19,31 +19,50 @@ import (
)
var (
noOutput = false
url = "http://localhost:5572/"
noOutput = false
url = "http://localhost:5572/"
jsonInput = ""
authUser = ""
authPass = ""
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
commandDefintion.Flags().StringVarP(&url, "url", "", url, "URL to connect to rclone remote control.")
commandDefintion.Flags().StringVarP(&jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
commandDefintion.Flags().StringVarP(&authUser, "user", "", "", "Username to use to rclone remote control.")
commandDefintion.Flags().StringVarP(&authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
}
var commandDefintion = &cobra.Command{
Use: "rc commands parameter",
Short: `Run a command against a running rclone.`,
Long: `
This runs a command against a running rclone. By default it will use
that specified in the --rc-addr command.
This runs a command against a running rclone. Use the --url flag to
specify an non default URL to connect on. This can be either a
":port" which is taken to mean "http://localhost:port" or a
"host:port" which is taken to mean "http://host:port"
A username and password can be passed in with --user and --pass.
Note that --rc-addr, --rc-user, --rc-pass will be read also for --url,
--user, --pass.
Arguments should be passed in as parameter=value.
The result will be returned as a JSON object by default.
The --json parameter can be used to pass in a JSON blob as an input
instead of key=value arguments. This is the only way of passing in
more complicated values.
Use "rclone rc" to see a list of all possible commands.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1E9, command, args)
cmd.Run(false, false, command, func() error {
parseFlags()
if len(args) == 0 {
return list()
}
@@ -52,30 +71,56 @@ Use "rclone rc" to see a list of all possible commands.`,
},
}
// Parse the flags
func parseFlags() {
// set alternates from alternate flags
setAlternateFlag("rc-addr", &url)
setAlternateFlag("rc-user", &authUser)
setAlternateFlag("rc-pass", &authPass)
// If url is just :port then fix it up
if strings.HasPrefix(url, ":") {
url = "localhost" + url
}
// if url is just host:port add http://
if !strings.HasPrefix(url, "http:") && !strings.HasPrefix(url, "https:") {
url = "http://" + url
}
// if url doesn't end with / add it
if !strings.HasSuffix(url, "/") {
url += "/"
}
}
// If the user set flagName set the output to its value
func setAlternateFlag(flagName string, output *string) {
if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed {
*output = rcFlag.Value.String()
}
}
// do a call from (path, in) to (out, err).
//
// if err is set, out may be a valid error return or it may be nil
func doCall(path string, in rc.Params) (out rc.Params, err error) {
// Do HTTP request
client := fshttp.NewClient(fs.Config)
url := url
// set the user use --rc-addr as well as --url
if rcAddrFlag := pflag.Lookup("rc-addr"); rcAddrFlag != nil && rcAddrFlag.Changed {
url = rcAddrFlag.Value.String()
if strings.HasPrefix(url, ":") {
url = "localhost" + url
}
url = "http://" + url + "/"
}
if !strings.HasSuffix(url, "/") {
url += "/"
}
url += path
data, err := json.Marshal(in)
if err != nil {
return nil, errors.Wrap(err, "failed to encode JSON")
}
resp, err := client.Post(url, "application/json", bytes.NewBuffer(data))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(data))
if err != nil {
return nil, errors.Wrap(err, "failed to make request")
}
req.Header.Set("Content-Type", "application/json")
if authUser != "" || authPass != "" {
req.SetBasicAuth(authUser, authPass)
}
resp, err := client.Do(req)
if err != nil {
return nil, errors.Wrap(err, "connection failed")
}
@@ -115,13 +160,24 @@ func run(args []string) (err error) {
// parse input
in := make(rc.Params)
for _, param := range args[1:] {
equals := strings.IndexRune(param, '=')
if equals < 0 {
return errors.Errorf("No '=' found in parameter %q", param)
params := args[1:]
if jsonInput == "" {
for _, param := range params {
equals := strings.IndexRune(param, '=')
if equals < 0 {
return errors.Errorf("no '=' found in parameter %q", param)
}
key, value := param[:equals], param[equals+1:]
in[key] = value
}
} else {
if len(params) > 0 {
return errors.New("can't use --json and parameters together")
}
err = json.Unmarshal([]byte(jsonInput), &in)
if err != nil {
return errors.Wrap(err, "bad --json input")
}
key, value := param[:equals], param[equals+1:]
in[key] = value
}
// Do the call
@@ -155,6 +211,11 @@ func list() error {
}
fmt.Printf("### %s: %s\n\n", info["Path"], info["Title"])
fmt.Printf("%s\n\n", info["Help"])
if authRequired := info["AuthRequired"]; authRequired != nil {
if authRequired.(bool) {
fmt.Printf("Authentication is required for this call.\n\n")
}
}
}
return nil
}

49
cmd/rcd/rcd.go Normal file
View File

@@ -0,0 +1,49 @@
package rcd
import (
"log"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/rc/rcflags"
"github.com/ncw/rclone/fs/rc/rcserver"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefintion)
}
var commandDefintion = &cobra.Command{
Use: "rcd <path to files to serve>*",
Short: `Run rclone listening to remote control commands only.`,
Long: `
This runs rclone so that it only listents to remote control commands.
This is useful if you are controlling rclone via the rc API.
If you pass in a path to a directory, rclone will serve that directory
for GET requests on the URL passed in. It will also open the URL in
the browser when rclone is run.
See the [rc documentation](/rc/) for more info on the rc flags.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if rcflags.Opt.Enabled {
log.Fatalf("Don't supply --rc flag when using rcd")
}
// Start the rc
rcflags.Opt.Enabled = true
if len(args) > 0 {
rcflags.Opt.Files = args[0]
}
s, err := rcserver.Start(&rcflags.Opt)
if err != nil {
log.Fatalf("Failed to start remote control: %v", err)
}
if s == nil {
log.Fatal("rc server not configured")
}
s.Wait()
},
}

View File

@@ -3,6 +3,7 @@ package ftpflags
import (
"github.com/ncw/rclone/cmd/serve/ftp/ftpopt"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/rc"
"github.com/spf13/pflag"
)
@@ -13,6 +14,7 @@ var (
// AddFlagsPrefix adds flags for the ftpopt
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")

View File

@@ -1,8 +1,6 @@
package http
import (
"fmt"
"html/template"
"net/http"
"os"
"path"
@@ -12,9 +10,9 @@ import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/cmd/serve/httplib/httpflags"
"github.com/ncw/rclone/cmd/serve/httplib/serve"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
@@ -46,7 +44,11 @@ control the stats printing.
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, &httpflags.Opt)
s.serve()
err := s.Serve()
if err != nil {
return err
}
s.Wait()
return nil
})
},
@@ -54,30 +56,32 @@ control the stats printing.
// server contains everything to run the server
type server struct {
*httplib.Server
f fs.Fs
vfs *vfs.VFS
srv *httplib.Server
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
srv: httplib.NewServer(mux, opt),
Server: httplib.NewServer(mux, opt),
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
}
mux.HandleFunc("/", s.handler)
return s
}
// serve runs the http server - doesn't return
func (s *server) serve() {
err := s.srv.Serve()
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.Server.Serve()
if err != nil {
fs.Errorf(s.f, "Opening listener: %v", err)
return err
}
fs.Logf(s.f, "Serving on %s", s.srv.URL())
s.srv.Wait()
fs.Logf(s.f, "Serving on %s", s.URL())
return nil
}
// handler reads incoming requests and dispatches them
@@ -99,62 +103,6 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
}
}
// entry is a directory entry
type entry struct {
remote string
URL string
Leaf string
}
// entries represents a directory
type entries []entry
// addEntry adds an entry to that directory
func (es *entries) addEntry(node interface {
Path() string
Name() string
IsDir() bool
}) {
remote := node.Path()
leaf := node.Name()
urlRemote := leaf
if node.IsDir() {
leaf += "/"
urlRemote += "/"
}
*es = append(*es, entry{remote: remote, URL: rest.URLPathEscape(urlRemote), Leaf: leaf})
}
// indexPage is a directory listing template
var indexPage = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
</head>
<body>
<h1>{{ .Title }}</h1>
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
{{ end }}</body>
</html>
`
// indexTemplate is the instantiated indexPage
var indexTemplate = template.Must(template.New("index").Parse(indexPage))
// indexData is used to fill in the indexTemplate
type indexData struct {
Title string
Entries entries
}
// error returns an http.StatusInternalServerError and logs the error
func internalError(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}
// serveDir serves a directory index at dirRemote
func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
// List the directory
@@ -163,7 +111,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
http.Error(w, "Directory not found", http.StatusNotFound)
return
} else if err != nil {
internalError(dirRemote, w, "Failed to list directory", err)
serve.Error(dirRemote, w, "Failed to list directory", err)
return
}
if !node.IsDir() {
@@ -173,28 +121,17 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
dir := node.(*vfs.Dir)
dirEntries, err := dir.ReadDirAll()
if err != nil {
internalError(dirRemote, w, "Failed to list directory", err)
serve.Error(dirRemote, w, "Failed to list directory", err)
return
}
var out entries
// Make the entries for display
directory := serve.NewDirectory(dirRemote)
for _, node := range dirEntries {
out.addEntry(node)
directory.AddEntry(node.Path(), node.IsDir())
}
// Account the transfer
accounting.Stats.Transferring(dirRemote)
defer accounting.Stats.DoneTransferring(dirRemote, true)
fs.Infof(dirRemote, "%s: Serving directory", r.RemoteAddr)
err = indexTemplate.Execute(w, indexData{
Entries: out,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
})
if err != nil {
internalError(dirRemote, w, "Failed to render template", err)
return
}
directory.Serve(w, r)
}
// serveFile serves a file object at remote
@@ -205,7 +142,7 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
http.Error(w, "File not found", http.StatusNotFound)
return
} else if err != nil {
internalError(remote, w, "Failed to find file", err)
serve.Error(remote, w, "Failed to find file", err)
return
}
if !node.IsFile() {
@@ -239,7 +176,7 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
// open the object
in, err := file.Open(os.O_RDONLY)
if err != nil {
internalError(remote, w, "Failed to open file", err)
serve.Error(remote, w, "Failed to open file", err)
return
}
defer func() {

View File

@@ -7,7 +7,6 @@ import (
"io/ioutil"
"net"
"net/http"
"path"
"strings"
"testing"
"time"
@@ -35,7 +34,7 @@ func startServer(t *testing.T, f fs.Fs) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
httpServer = newServer(f, &opt)
go httpServer.serve()
assert.NoError(t, httpServer.Serve())
// try to connect to the test server
pause := time.Millisecond
@@ -202,36 +201,7 @@ func TestGET(t *testing.T) {
}
}
type mockNode struct {
path string
isdir bool
}
func (n mockNode) Path() string { return n.path }
func (n mockNode) Name() string {
if n.path == "" {
return ""
}
return path.Base(n.path)
}
func (n mockNode) IsDir() bool { return n.isdir }
func TestAddEntry(t *testing.T) {
var es entries
es.addEntry(mockNode{path: "", isdir: true})
es.addEntry(mockNode{path: "dir", isdir: true})
es.addEntry(mockNode{path: "a/b/c/d.txt", isdir: false})
es.addEntry(mockNode{path: "a/b/c/colon:colon.txt", isdir: false})
es.addEntry(mockNode{path: "\"quotes\".txt", isdir: false})
assert.Equal(t, entries{
{remote: "", URL: "/", Leaf: "/"},
{remote: "dir", URL: "dir/", Leaf: "dir/"},
{remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt"},
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt"},
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt"},
}, es)
}
func TestFinalise(t *testing.T) {
httpServer.srv.Close()
httpServer.Close()
httpServer.Wait()
}

View File

@@ -3,6 +3,7 @@ package httpflags
import (
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/rc"
"github.com/spf13/pflag"
)
@@ -13,6 +14,7 @@ var (
// AddFlagsPrefix adds flags for the httplib
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options) {
rc.AddOption(prefix+"http", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")

View File

@@ -13,6 +13,7 @@ import (
auth "github.com/abbot/go-http-auth"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Globals
@@ -105,6 +106,7 @@ type Server struct {
httpServer *http.Server
basicPassHashed string
useSSL bool // if server is configured for SSL/TLS
usingAuth bool // set if authentication is configured
}
// singleUserProvider provides the encrypted password for a single user
@@ -142,6 +144,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
}
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
handler = auth.JustCheck(authenticator, handler.ServeHTTP)
s.usingAuth = true
}
s.useSSL = s.Opt.SslKey != ""
@@ -188,7 +191,7 @@ func NewServer(handler http.Handler, opt *Options) *Server {
func (s *Server) Serve() error {
ln, err := net.Listen("tcp", s.httpServer.Addr)
if err != nil {
return err
return errors.Wrapf(err, "start server failed")
}
s.listener = ln
s.waitChan = make(chan struct{})
@@ -254,3 +257,8 @@ func (s *Server) URL() string {
}
return fmt.Sprintf("%s://%s/", proto, addr)
}
// UsingAuth returns true if authentication is required
func (s *Server) UsingAuth() bool {
return s.usingAuth
}

View File

@@ -0,0 +1,102 @@
package serve
import (
"fmt"
"html/template"
"net/http"
"net/url"
"path"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
)
// DirEntry is a directory entry
type DirEntry struct {
remote string
URL string
Leaf string
}
// Directory represents a directory
type Directory struct {
DirRemote string
Title string
Entries []DirEntry
Query string
}
// NewDirectory makes an empty Directory
func NewDirectory(dirRemote string) *Directory {
d := &Directory{
DirRemote: dirRemote,
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
}
return d
}
// SetQuery sets the query parameters for each URL
func (d *Directory) SetQuery(queryParams url.Values) *Directory {
d.Query = ""
if len(queryParams) > 0 {
d.Query = "?" + queryParams.Encode()
}
return d
}
// AddEntry adds an entry to that directory
func (d *Directory) AddEntry(remote string, isDir bool) {
leaf := path.Base(remote)
if leaf == "." {
leaf = ""
}
urlRemote := leaf
if isDir {
leaf += "/"
urlRemote += "/"
}
d.Entries = append(d.Entries, DirEntry{
remote: remote,
URL: rest.URLPathEscape(urlRemote) + d.Query,
Leaf: leaf,
})
}
// Error returns an http.StatusInternalServerError and logs the error
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}
// Serve serves a directory
func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) {
// Account the transfer
accounting.Stats.Transferring(d.DirRemote)
defer accounting.Stats.DoneTransferring(d.DirRemote, true)
fs.Infof(d.DirRemote, "%s: Serving directory", r.RemoteAddr)
err := indexTemplate.Execute(w, d)
if err != nil {
Error(d.DirRemote, w, "Failed to render template", err)
return
}
}
// indexPage is a directory listing template
var indexPage = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
</head>
<body>
<h1>{{ .Title }}</h1>
{{ range $i := .Entries }}<a href="{{ $i.URL }}">{{ $i.Leaf }}</a><br />
{{ end }}</body>
</html>
`
// indexTemplate is the instantiated indexPage
var indexTemplate = template.Must(template.New("index").Parse(indexPage))

View File

@@ -0,0 +1,88 @@
package serve
import (
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewDirectory(t *testing.T) {
d := NewDirectory("z")
assert.Equal(t, "z", d.DirRemote)
assert.Equal(t, "Directory listing of /z", d.Title)
}
func TestSetQuery(t *testing.T) {
d := NewDirectory("z")
assert.Equal(t, "", d.Query)
d.SetQuery(url.Values{"potato": []string{"42"}})
assert.Equal(t, "?potato=42", d.Query)
d.SetQuery(url.Values{})
assert.Equal(t, "", d.Query)
}
func TestAddEntry(t *testing.T) {
var d = NewDirectory("z")
d.AddEntry("", true)
d.AddEntry("dir", true)
d.AddEntry("a/b/c/d.txt", false)
d.AddEntry("a/b/c/colon:colon.txt", false)
d.AddEntry("\"quotes\".txt", false)
assert.Equal(t, []DirEntry{
{remote: "", URL: "/", Leaf: "/"},
{remote: "dir", URL: "dir/", Leaf: "dir/"},
{remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt"},
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt"},
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt"},
}, d.Entries)
// Now test with a query parameter
d = NewDirectory("z").SetQuery(url.Values{"potato": []string{"42"}})
d.AddEntry("file", false)
d.AddEntry("dir", true)
assert.Equal(t, []DirEntry{
{remote: "file", URL: "file?potato=42", Leaf: "file"},
{remote: "dir", URL: "dir/?potato=42", Leaf: "dir/"},
}, d.Entries)
}
func TestError(t *testing.T) {
w := httptest.NewRecorder()
err := errors.New("help")
Error("potato", w, "sausage", err)
resp := w.Result()
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "sausage.\n", string(body))
}
func TestServe(t *testing.T) {
d := NewDirectory("aDirectory")
d.AddEntry("file", false)
d.AddEntry("dir", true)
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aDirectory/", nil)
d.Serve(w, r)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /aDirectory</title>
</head>
<body>
<h1>Directory listing of /aDirectory</h1>
<a href="file">file</a><br />
<a href="dir/">dir/</a><br />
</body>
</html>
`, string(body))
}

View File

@@ -0,0 +1,102 @@
// Package serve deals with serving objects over HTTP
package serve
import (
"fmt"
"io"
"net/http"
"path"
"strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
)
// Object serves an fs.Object via HEAD or GET
func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
if r.Method != "HEAD" && r.Method != "GET" {
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
return
}
// Show that we accept ranges
w.Header().Set("Accept-Ranges", "bytes")
// Set content length since we know how long the object is
if o.Size() >= 0 {
w.Header().Set("Content-Length", strconv.FormatInt(o.Size(), 10))
}
// Set content type
mimeType := fs.MimeType(o)
if mimeType == "application/octet-stream" && path.Ext(o.Remote()) == "" {
// Leave header blank so http server guesses
} else {
w.Header().Set("Content-Type", mimeType)
}
if r.Method == "HEAD" {
return
}
// Decode Range request if present
code := http.StatusOK
size := o.Size()
var options []fs.OpenOption
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
//fs.Debugf(nil, "Range: request %q", rangeRequest)
option, err := fs.ParseRangeOption(rangeRequest)
if err != nil {
fs.Debugf(o, "Get request parse range request error: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
options = append(options, option)
offset, limit := option.Decode(o.Size())
end := o.Size() // exclusive
if limit >= 0 {
end = offset + limit
}
if end > o.Size() {
end = o.Size()
}
size = end - offset
// fs.Debugf(nil, "Range: offset=%d, limit=%d, end=%d, size=%d (object size %d)", offset, limit, end, size, o.Size())
// Content-Range: bytes 0-1023/146515
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", offset, end-1, o.Size()))
// fs.Debugf(nil, "Range: Content-Range: %q", w.Header().Get("Content-Range"))
code = http.StatusPartialContent
}
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
file, err := o.Open(options...)
if err != nil {
fs.Debugf(o, "Get request open error: %v", err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
accounting.Stats.Transferring(o.Remote())
in := accounting.NewAccount(file, o) // account the transfer (no buffering)
defer func() {
closeErr := in.Close()
if closeErr != nil {
fs.Errorf(o, "Get request: close failed: %v", closeErr)
if err == nil {
err = closeErr
}
}
ok := err == nil
accounting.Stats.DoneTransferring(o.Remote(), ok)
if !ok {
accounting.Stats.Error(err)
}
}()
w.WriteHeader(code)
n, err := io.Copy(w, in)
if err != nil {
fs.Errorf(o, "Didn't finish writing GET request (wrote %d/%d bytes): %v", n, size, err)
return
}
}

View File

@@ -0,0 +1,76 @@
package serve
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
)
func TestObjectBadMethod(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("BADMETHOD", "http://example.com/aFile", nil)
o := mockobject.New("aFile")
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusMethodNotAllowed, resp.StatusCode)
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "Method Not Allowed\n", string(body))
}
func TestObjectHEAD(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("HEAD", "http://example.com/aFile", nil)
o := mockobject.New("aFile").WithContent([]byte("hello"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "5", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "", string(body))
}
func TestObjectGET(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
o := mockobject.New("aFile").WithContent([]byte("hello"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "5", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "hello", string(body))
}
func TestObjectRange(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
r.Header.Add("Range", "bytes=3-5")
o := mockobject.New("aFile").WithContent([]byte("0123456789"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusPartialContent, resp.StatusCode)
assert.Equal(t, "3", resp.Header.Get("Content-Length"))
assert.Equal(t, "bytes", resp.Header.Get("Accept-Ranges"))
assert.Equal(t, "bytes 3-5/10", resp.Header.Get("Content-Range"))
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "345", string(body))
}
func TestObjectBadRange(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "http://example.com/aFile", nil)
r.Header.Add("Range", "xxxbytes=3-5")
o := mockobject.New("aFile").WithContent([]byte("0123456789"), mockobject.SeekModeNone)
Object(w, r, o)
resp := w.Result()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
assert.Equal(t, "10", resp.Header.Get("Content-Length"))
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, "Bad Request\n", string(body))
}

View File

@@ -4,19 +4,17 @@ package restic
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/cmd/serve/httplib/httpflags"
"github.com/ncw/rclone/cmd/serve/httplib/serve"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/fserrors"
@@ -138,8 +136,11 @@ these **must** end with /. Eg
httpSrv.ServeConn(conn, opts)
return nil
}
s.serve()
err := s.Serve()
if err != nil {
return err
}
s.Wait()
return nil
})
},
@@ -151,28 +152,30 @@ const (
// server contains everything to run the server
type server struct {
f fs.Fs
srv *httplib.Server
*httplib.Server
f fs.Fs
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
f: f,
srv: httplib.NewServer(mux, opt),
Server: httplib.NewServer(mux, opt),
f: f,
}
mux.HandleFunc("/", s.handler)
return s
}
// serve runs the http server - doesn't return
func (s *server) serve() {
err := s.srv.Serve()
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.Server.Serve()
if err != nil {
fs.Errorf(s.f, "Opening listener: %v", err)
return err
}
fs.Logf(s.f, "Serving restic REST API on %s", s.srv.URL())
s.srv.Wait()
fs.Logf(s.f, "Serving restic REST API on %s", s.URL())
return nil
}
var matchData = regexp.MustCompile("(?:^|/)data/([^/]{2,})$")
@@ -215,10 +218,8 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
}
} else {
switch r.Method {
case "GET":
s.getObject(w, r, remote)
case "HEAD":
s.headObject(w, r, remote)
case "GET", "HEAD":
s.serveObject(w, r, remote)
case "POST":
s.postObject(w, r, remote)
case "DELETE":
@@ -229,91 +230,15 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
}
}
// head request the remote
func (s *server) headObject(w http.ResponseWriter, r *http.Request, remote string) {
o, err := s.f.NewObject(remote)
if err != nil {
fs.Debugf(remote, "Head request error: %v", err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
// Set content length since we know how long the object is
w.Header().Set("Content-Length", strconv.FormatInt(o.Size(), 10))
}
// get the remote
func (s *server) getObject(w http.ResponseWriter, r *http.Request, remote string) {
func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote string) {
o, err := s.f.NewObject(remote)
if err != nil {
fs.Debugf(remote, "Get request error: %v", err)
fs.Debugf(remote, "%s request error: %v", r.Method, err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
// Set content length since we know how long the object is
w.Header().Set("Content-Length", strconv.FormatInt(o.Size(), 10))
// Decode Range request if present
code := http.StatusOK
size := o.Size()
var options []fs.OpenOption
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
//fs.Debugf(nil, "Range: request %q", rangeRequest)
option, err := fs.ParseRangeOption(rangeRequest)
if err != nil {
fs.Debugf(remote, "Get request parse range request error: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
options = append(options, option)
offset, limit := option.Decode(o.Size())
end := o.Size() // exclusive
if limit >= 0 {
end = offset + limit
}
if end > o.Size() {
end = o.Size()
}
size = end - offset
// fs.Debugf(nil, "Range: offset=%d, limit=%d, end=%d, size=%d (object size %d)", offset, limit, end, size, o.Size())
// Content-Range: bytes 0-1023/146515
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", offset, end-1, o.Size()))
// fs.Debugf(nil, "Range: Content-Range: %q", w.Header().Get("Content-Range"))
code = http.StatusPartialContent
}
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
file, err := o.Open(options...)
if err != nil {
fs.Debugf(remote, "Get request open error: %v", err)
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
accounting.Stats.Transferring(o.Remote())
in := accounting.NewAccount(file, o) // account the transfer (no buffering)
defer func() {
closeErr := in.Close()
if closeErr != nil {
fs.Errorf(remote, "Get request: close failed: %v", closeErr)
if err == nil {
err = closeErr
}
}
ok := err == nil
accounting.Stats.DoneTransferring(o.Remote(), ok)
if !ok {
accounting.Stats.Error(err)
}
}()
w.WriteHeader(code)
n, err := io.Copy(w, in)
if err != nil {
fs.Errorf(remote, "Didn't finish writing GET request (wrote %d/%d bytes): %v", n, size, err)
return
}
serve.Object(w, r, o)
}
// postObject posts an object to the repository

View File

@@ -41,8 +41,11 @@ func TestRestic(t *testing.T) {
// Start the server
w := newServer(fremote, &opt)
go w.serve()
defer w.srv.Close()
assert.NoError(t, w.Serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir(resticSource)

View File

@@ -68,8 +68,12 @@ Use "rclone hashsum" to see the full list.
fs.Debugf(f, "Using hash %v for ETag", hashType)
}
cmd.Run(false, false, command, func() error {
w := newWebDAV(f, &httpflags.Opt)
w.serve()
s := newWebDAV(f, &httpflags.Opt)
err := s.serve()
if err != nil {
return err
}
s.Wait()
return nil
})
return nil
@@ -89,9 +93,9 @@ Use "rclone hashsum" to see the full list.
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type WebDAV struct {
*httplib.Server
f fs.Fs
vfs *vfs.VFS
srv *httplib.Server
}
// check interface
@@ -110,18 +114,20 @@ func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV {
Logger: w.logRequest, // FIXME
}
w.srv = httplib.NewServer(handler, opt)
w.Server = httplib.NewServer(handler, opt)
return w
}
// serve runs the http server - doesn't return
func (w *WebDAV) serve() {
err := w.srv.Serve()
// serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (w *WebDAV) serve() error {
err := w.Serve()
if err != nil {
fs.Errorf(w.f, "Opening listener: %v", err)
return err
}
fs.Logf(w.f, "WebDav Server started on %s", w.srv.URL())
w.srv.Wait()
fs.Logf(w.f, "WebDav Server started on %s", w.URL())
return nil
}
// logRequest is called by the webdav module on every request

View File

@@ -48,8 +48,11 @@ func TestWebDav(t *testing.T) {
// Start the server
w := newWebDAV(fremote, &opt)
go w.serve()
defer w.srv.Close()
assert.NoError(t, w.serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/webdav")

View File

@@ -32,6 +32,8 @@ extended explanation in the ` + "`" + `copy` + "`" + ` command above if unsure.
If dest:path doesn't exist, it is created and the source:path contents
go there.
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)

View File

@@ -4,13 +4,12 @@ import (
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/version"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -66,63 +65,8 @@ Or
},
}
var parseVersion = regexp.MustCompile(`^(?:rclone )?v(\d+)\.(\d+)(?:\.(\d+))?(?:-(\d+)(?:-(g[\wβ-]+))?)?$`)
type version []int
func newVersion(in string) (v version, err error) {
r := parseVersion.FindStringSubmatch(in)
if r == nil {
return v, errors.Errorf("failed to match version string %q", in)
}
atoi := func(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
fs.Errorf(nil, "Failed to parse %q as int from %q: %v", s, in, err)
}
return i
}
v = version{
atoi(r[1]), // major
atoi(r[2]), // minor
}
if r[3] != "" {
v = append(v, atoi(r[3])) // patch
} else if r[4] != "" {
v = append(v, 0) // patch
}
if r[4] != "" {
v = append(v, atoi(r[4])) // dev
}
return v, nil
}
// String converts v to a string
func (v version) String() string {
var out []string
for _, vv := range v {
out = append(out, fmt.Sprint(vv))
}
return strings.Join(out, ".")
}
// cmp compares two versions returning >0, <0 or 0
func (v version) cmp(o version) (d int) {
n := len(v)
if n > len(o) {
n = len(o)
}
for i := 0; i < n; i++ {
d = v[i] - o[i]
if d != 0 {
return d
}
}
return len(v) - len(o)
}
// getVersion gets the version by checking the download repository passed in
func getVersion(url string) (v version, vs string, date time.Time, err error) {
func getVersion(url string) (v version.Version, vs string, date time.Time, err error) {
resp, err := http.Get(url)
if err != nil {
return v, vs, date, err
@@ -144,26 +88,17 @@ func getVersion(url string) (v version, vs string, date time.Time, err error) {
if err != nil {
return v, vs, date, err
}
v, err = newVersion(vs)
v, err = version.New(vs)
return v, vs, date, err
}
// check the current version against available versions
func checkVersion() {
// Get Current version
currentVersion := fs.Version
currentIsGit := strings.HasSuffix(currentVersion, "-DEV")
if currentIsGit {
currentVersion = currentVersion[:len(currentVersion)-4]
}
vCurrent, err := newVersion(currentVersion)
vCurrent, err := version.New(fs.Version)
if err != nil {
fs.Errorf(nil, "Failed to get parse version: %v", err)
}
if currentIsGit {
vCurrent = append(vCurrent, 999, 999)
}
const timeFormat = "2006-01-02"
printVersion := func(what, url string) {
@@ -177,7 +112,7 @@ func checkVersion() {
v,
"(released "+t.Format(timeFormat)+")",
)
if v.cmp(vCurrent) > 0 {
if v.Cmp(vCurrent) > 0 {
fmt.Printf(" upgrade: %s\n", url+vs)
}
}
@@ -190,7 +125,7 @@ func checkVersion() {
"beta",
"https://beta.rclone.org/",
)
if currentIsGit {
if vCurrent.IsGit() {
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
}
}

View File

@@ -1,7 +1,6 @@
package version
import (
"fmt"
"io/ioutil"
"os"
"runtime"
@@ -46,65 +45,3 @@ func TestVersionWorksWithoutAccessibleConfigFile(t *testing.T) {
// assert.NoError(t, cmd.Root.Execute())
// })
}
func TestVersionNew(t *testing.T) {
for _, test := range []struct {
in string
want version
wantErr bool
}{
{"v1.41", version{1, 41}, false},
{"rclone v1.41", version{1, 41}, false},
{"rclone v1.41.23", version{1, 41, 23}, false},
{"rclone v1.41.23-100", version{1, 41, 23, 100}, false},
{"rclone v1.41-100", version{1, 41, 0, 100}, false},
{"rclone v1.41.23-100-g12312a", version{1, 41, 23, 100}, false},
{"rclone v1.41-100-g12312a", version{1, 41, 0, 100}, false},
{"rclone v1.42-005-g56e1e820β", version{1, 42, 0, 5}, false},
{"rclone v1.42-005-g56e1e820-feature-branchβ", version{1, 42, 0, 5}, false},
{"v1.41s", nil, true},
{"rclone v1-41", nil, true},
{"rclone v1.41.2c3", nil, true},
{"rclone v1.41.23-100 potato", nil, true},
{"rclone 1.41-100", nil, true},
{"rclone v1.41.23-100-12312a", nil, true},
} {
what := fmt.Sprintf("in=%q", test.in)
got, err := newVersion(test.in)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
}
assert.Equal(t, test.want, got, what)
}
}
func TestVersionCmp(t *testing.T) {
for _, test := range []struct {
a, b version
want int
}{
{version{1}, version{1}, 0},
{version{1}, version{2}, -1},
{version{2}, version{1}, 1},
{version{2}, version{2, 1}, -1},
{version{2, 1}, version{2}, 1},
{version{2, 1}, version{2, 1}, 0},
{version{2, 1}, version{2, 2}, -1},
{version{2, 2}, version{2, 1}, 1},
} {
got := test.a.cmp(test.b)
if got < 0 {
got = -1
} else if got > 0 {
got = 1
}
assert.Equal(t, test.want, got, fmt.Sprintf("%v cmp %v", test.a, test.b))
// test the reverse
got = -test.b.cmp(test.a)
assert.Equal(t, test.want, got, fmt.Sprintf("%v cmp %v", test.b, test.a))
}
}

View File

@@ -208,3 +208,5 @@ Contributors
* David Haguenauer <ml@kurokatta.org>
* teresy <hi.teresy@gmail.com>
* buergi <patbuergi@gmx.de>
* Florian Gamboeck <mail@floga.de>
* Ralf Hemberger <10364191+rhemberger@users.noreply.github.com>

View File

@@ -293,6 +293,10 @@ This reads a list of file names from the file passed in and **only**
these files are transferred. The **filtering rules are ignored**
completely if you use this option.
Rclone will not scan any directories if you use `--files-from` it will
just look at the files specified. Rclone will not error if any of the
files are missing from the source.
This option can be repeated to read from more than one file. These
are read in the order that they are placed on the command line.

View File

@@ -68,6 +68,9 @@ d) Delete this remote
y/e/d> y
```
**NOTE:** The encryption keys need to have been already generated after a regular login
via the browser, otherwise attempting to use the credentials in `rclone` will fail.
Once configured you can then use `rclone` like this,
List directories in top level of your Mega
@@ -152,11 +155,9 @@ permanently delete objects instead.
### Limitations ###
This backend uses the [go-mega go
library](https://github.com/t3rm1n4l/go-mega) which is an opensource
This backend uses the [go-mega go library](https://github.com/t3rm1n4l/go-mega) which is an opensource
go library implementing the Mega API. There doesn't appear to be any
documentation for the mega protocol beyond the [mega C++
SDK](https://github.com/meganz/sdk) source code so there are likely
quite a few errors still remaining in this library.
documentation for the mega protocol beyond the [mega C++ SDK](https://github.com/meganz/sdk) source code
so there are likely quite a few errors still remaining in this library.
Mega allows duplicate files which may confuse rclone.

View File

@@ -9,46 +9,95 @@ date: "2018-03-05"
If rclone is run with the `--rc` flag then it starts an http server
which can be used to remote control rclone.
If you just want to run a remote control then see the [rcd command](/commands/rclone_rcd/).
**NB** this is experimental and everything here is subject to change!
## Supported parameters
#### --rc ####
### --rc
Flag to start the http server listen on remote requests
#### --rc-addr=IP ####
### --rc-addr=IP
IPaddress:Port or :Port to bind server to. (default "localhost:5572")
#### --rc-cert=KEY ####
### --rc-cert=KEY
SSL PEM key (concatenation of certificate and CA certificate)
#### --rc-client-ca=PATH ####
### --rc-client-ca=PATH
Client certificate authority to verify clients with
#### --rc-htpasswd=PATH ####
### --rc-htpasswd=PATH
htpasswd file - if not provided no authentication is done
#### --rc-key=PATH ####
### --rc-key=PATH
SSL PEM Private key
#### --rc-max-header-bytes=VALUE ####
### --rc-max-header-bytes=VALUE
Maximum size of request header (default 4096)
#### --rc-user=VALUE ####
### --rc-user=VALUE
User name for authentication.
#### --rc-pass=VALUE ####
### --rc-pass=VALUE
Password for authentication.
#### --rc-realm=VALUE ####
### --rc-realm=VALUE
Realm for authentication (default "rclone")
#### --rc-server-read-timeout=DURATION ####
### --rc-server-read-timeout=DURATION
Timeout for server reading data (default 1h0m0s)
#### --rc-server-write-timeout=DURATION ####
### --rc-server-write-timeout=DURATION
Timeout for server writing data (default 1h0m0s)
### --rc-serve
Enable the serving of remote objects via the HTTP interface. This
means objects will be accessible at http://127.0.0.1:5572/ by default,
so you can browse to http://127.0.0.1:5572/ or http://127.0.0.1:5572/*
to see a listing of the remotes. Objects may be requested from
remotes using this syntax http://127.0.0.1:5572/[remote:path]/path/to/object
Default Off.
### --rc-files /path/to/directory
Path to local files to serve on the HTTP server.
If this is set then rclone will serve the files in that directory. It
will also open the root in the web browser if specified. This is for
implementing browser based GUIs for rclone functions.
If `--rc-user` or `--rc-pass` is set then the URL that is opened will
have the authorization in the URL in the `http://user:pass@localhost/`
style.
Default Off.
### --rc-no-auth
By default rclone will require authorisation to have been set up on
the rc interface in order to use any methods which access any rclone
remotes. Eg `operations/list` is denied as it involved creating a
remote as is `sync/copy`.
If this is set then no authorisation will be required on the server to
use these methods. The alternative is to use `--rc-user` and
`--rc-pass` and use these credentials in the request.
Default Off.
## Accessing the remote control via the rclone rc command
Rclone itself implements the remote control protocol in its `rclone
@@ -67,6 +116,92 @@ $ rclone rc rc/noop param1=one param2=two
Run `rclone rc` on its own to see the help for the installed remote
control commands.
`rclone rc` also supports a `--json` flag which can be used to send
more complicated input parameters.
```
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 } }' rc/noop
{
"p1": [
1,
"2",
null,
4
],
"p2": {
"a": 1,
"b": 2
}
}
```
## Special parameters
The rc interface supports some special parameters which apply to
**all** commands. These start with `_` to show they are different.
### Running asynchronous jobs with _async = true
If `_async` has a true value when supplied to an rc call then it will
return immediately with a job id and the task will be run in the
background. The `job/status` call can be used to get information of
the background job. The job can be queried for up to 1 minute after
it has finished.
It is recommended that potentially long running jobs, eg `sync/sync`,
`sync/copy`, `sync/move`, `operations/purge` are run with the `_async`
flag to avoid any potential problems with the HTTP request and
response timing out.
Starting a job with the `_async` flag:
```
$ rclone rc --json '{ "p1": [1,"2",null,4], "p2": { "a":1, "b":2 }, "_async": true }' rc/noop
{
"jobid": 2
}
```
Query the status to see if the job has finished. For more information
on the meaning of these return parameters see the `job/status` call.
```
$ rclone rc --json '{ "jobid":2 }' job/status
{
"duration": 0.000124163,
"endTime": "2018-10-27T11:38:07.911245881+01:00",
"error": "",
"finished": true,
"id": 2,
"output": {
"_async": true,
"p1": [
1,
"2",
null,
4
],
"p2": {
"a": 1,
"b": 2
}
},
"startTime": "2018-10-27T11:38:07.911121728+01:00",
"success": true
}
```
`job/list` can be used to show the running or recently completed jobs
```
$ rclone rc job/list
{
"jobids": [
2
]
}
```
## Supported commands
<!--- autogenerated start - run make rcdocs - don't edit here -->
### cache/expire: Purge a remote from cache
@@ -112,6 +247,90 @@ is used on top of the cache.
Show statistics for the cache remote.
### config/create: create the config for a remote.
This takes the following parameters
- name - name of remote
- type - type of new remote
- type - type of the new remote
See the [config create command](/commands/rclone_config_create/) command for more information on the above.
Authentication is required for this call.
### config/delete: Delete a remote in the config file.
Parameters:
- name - name of remote to delete
See the [config delete command](/commands/rclone_config_delete/) command for more information on the above.
Authentication is required for this call.
### config/dump: Dumps the config file.
Returns a JSON object:
- key: value
Where keys are remote names and values are the config parameters.
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
Authentication is required for this call.
### config/get: Get a remote in the config file.
Parameters:
- name - name of remote to get
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
Authentication is required for this call.
### config/listremotes: Lists the remotes in the config file.
Returns
- remotes - array of remote names
See the [listremotes command](/commands/rclone_listremotes/) command for more information on the above.
Authentication is required for this call.
### config/password: password the config for a remote.
This takes the following parameters
- name - name of remote
- type - type of new remote
See the [config password command](/commands/rclone_config_password/) command for more information on the above.
Authentication is required for this call.
### config/providers: Shows how providers are configured in the config file.
Returns a JSON object:
- providers - array of objects
See the [config providers command](/commands/rclone_config_providers/) command for more information on the above.
Authentication is required for this call.
### config/update: update the config for a remote.
This takes the following parameters
- name - name of remote
- type - type of new remote
See the [config update command](/commands/rclone_config_update/) command for more information on the above.
Authentication is required for this call.
### core/bwlimit: Set the bandwidth limit.
This sets the bandwidth limit to that passed in.
@@ -142,6 +361,14 @@ The most interesting values for most people are:
* Sys: this is the total amount of memory requested from the OS
* It is virtual memory so may include unused memory
### core/obscure: Obscures a string passed in.
Pass a clear string and rclone will obscure it for the config file:
- clear - string
Returns
- obscured - string
### core/pid: Return PID of current process
This returns PID of current process.
@@ -186,6 +413,230 @@ Returns the following values:
Values for "transferring", "checking" and "lastError" are only assigned if data is available.
The value for "eta" is null if an eta cannot be determined.
### core/version: Shows the current version of rclone and the go runtime.
This shows the current version of go and the go runtime
- version - rclone version, eg "v1.44"
- decomposed - version number as [major, minor, patch, subpatch]
- note patch and subpatch will be 999 for a git compiled version
- isGit - boolean - true if this was compiled from the git version
- os - OS in use as according to Go
- arch - cpu architecture in use according to Go
- goVersion - version of Go runtime in use
### job/list: Lists the IDs of the running jobs
Parameters - None
Results
- jobids - array of integer job ids
### job/status: Reads the status of the job ID
Parameters
- jobid - id of the job (integer)
Results
- finished - boolean
- duration - time in seconds that the job ran for
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
- error - error from the job or empty string for no error
- finished - boolean whether the job has finished or not
- id - as passed in above
- startTime - time the job started (eg "2018-10-26T18:50:20.528336039+01:00")
- success - boolean - true for success false otherwise
- output - output of the job as would have been returned if called synchronously
### operations/about: Return the space used on the remote
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
The result is as returned from rclone about --json
Authentication is required for this call.
### operations/cleanup: Remove trashed files in the remote or path
This takes the following parameters
- fs - a remote name string eg "drive:"
See the [cleanup command](/commands/rclone_cleanup/) command for more information on the above.
Authentication is required for this call.
### operations/copyfile: Copy a file from source remote to destination remote
This takes the following parameters
- srcFs - a remote name string eg "drive:" for the source
- srcRemote - a path within that remote eg "file.txt" for the source
- dstFs - a remote name string eg "drive2:" for the destination
- dstRemote - a path within that remote eg "file2.txt" for the destination
This returns
- jobid - ID of async job to query with job/status
Authentication is required for this call.
### operations/copyurl: Copy the URL to the object
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
- url - string, URL to read from
See the [copyurl command](/commands/rclone_copyurl/) command for more information on the above.
Authentication is required for this call.
### operations/delete: Remove files in the path
This takes the following parameters
- fs - a remote name string eg "drive:"
See the [delete command](/commands/rclone_delete/) command for more information on the above.
Authentication is required for this call.
### operations/deletefile: Remove the single file pointed to
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
See the [deletefile command](/commands/rclone_deletefile/) command for more information on the above.
Authentication is required for this call.
### operations/list: List the given remote and path in JSON format
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
- opt - a dictionary of options to control the listing (optional)
- recurse - If set recurse directories
- noModTime - If set return modification time
- showEncrypted - If set show decrypted names
- showOrigIDs - If set show the IDs for each item if known
- showHash - If set return a dictionary of hashes
The result is
- list
- This is an array of objects as described in the lsjson command
See the lsjson command for more information on the above and examples.
Authentication is required for this call.
### operations/mkdir: Make a destination directory or container
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
See the [mkdir command](/commands/rclone_mkdir/) command for more information on the above.
Authentication is required for this call.
### operations/movefile: Move a file from source remote to destination remote
This takes the following parameters
- srcFs - a remote name string eg "drive:" for the source
- srcRemote - a path within that remote eg "file.txt" for the source
- dstFs - a remote name string eg "drive2:" for the destination
- dstRemote - a path within that remote eg "file2.txt" for the destination
This returns
- jobid - ID of async job to query with job/status
Authentication is required for this call.
### operations/purge: Remove a directory or container and all of its contents
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
See the [purge command](/commands/rclone_purge/) command for more information on the above.
Authentication is required for this call.
### operations/rmdir: Remove an empty directory or container
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
See the [rmdir command](/commands/rclone_rmdir/) command for more information on the above.
Authentication is required for this call.
### operations/rmdirs: Remove all the empty directories in the path
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
- leaveRoot - boolean, set to true not to delete the root
See the [rmdirs command](/commands/rclone_rmdirs/) command for more information on the above.
Authentication is required for this call.
### operations/size: Count the number of bytes and files in remote
This takes the following parameters
- fs - a remote name string eg "drive:path/to/dir"
Returns
- count - number of files
- bytes - number of bytes in those files
See the [size command](/commands/rclone_size/) command for more information on the above.
Authentication is required for this call.
### options/blocks: List all the option blocks
Returns
- options - a list of the options block names
### options/get: Get all the options
Returns an object where keys are option block names and values are an
object with the current option values in.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
### options/set: Set an option
Parameters
- option block name containing an object with
- key: value
Repeated as often as required.
Only supply the options you wish to change. If an option is unknown
it will be silently ignored. Not all options will have an effect when
changed like this.
### rc/error: This returns an error
This returns an error with the input as part of its error string.
@@ -202,6 +653,57 @@ This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.
### rc/noopauth: Echo the input to the output parameters requiring auth
This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.
Authentication is required for this call.
### sync/copy: copy a directory from source remote to destination remote
This takes the following parameters
- srcFs - a remote name string eg "drive:src" for the source
- dstFs - a remote name string eg "drive:dst" for the destination
This returns
- jobid - ID of async job to query with job/status
See the [copy command](/commands/rclone_copy/) command for more information on the above.
Authentication is required for this call.
### sync/move: move a directory from source remote to destination remote
This takes the following parameters
- srcFs - a remote name string eg "drive:src" for the source
- dstFs - a remote name string eg "drive:dst" for the destination
- deleteEmptySrcDirs - delete empty src directories if set
This returns
- jobid - ID of async job to query with job/status
See the [move command](/commands/rclone_move/) command for more information on the above.
Authentication is required for this call.
### sync/sync: sync a directory from source remote to destination remote
This takes the following parameters
- srcFs - a remote name string eg "drive:src" for the source
- dstFs - a remote name string eg "drive:dst" for the destination
This returns
- jobid - ID of async job to query with job/status
See the [sync command](/commands/rclone_sync/) command for more information on the above.
Authentication is required for this call.
### vfs/forget: Forget files or directories in the directory cache.
This forgets the paths in the directory cache causing them to be
@@ -276,9 +778,31 @@ blob in the body. There are examples of these below using `curl`.
The response will be a JSON blob in the body of the response. This is
formatted to be reasonably human readable.
If an error occurs then there will be an HTTP error status (usually
400) and the body of the response will contain a JSON encoded error
object.
### Error returns
If an error occurs then there will be an HTTP error status (eg 500)
and the body of the response will contain a JSON encoded error object,
eg
```
{
"error": "Expecting string value for key \"remote\" (was float64)",
"input": {
"fs": "/tmp",
"remote": 3
},
"status": 400
"path": "operations/rmdir",
}
```
The keys in the error response are
- error - error string
- input - the input parameters to the call
- status - the HTTP status code
- path - the path of the call
### CORS
The sever implements basic CORS support and allows all origins for that.
The response to a preflight OPTIONS request will echo the requested "Access-Control-Request-Headers" back.
@@ -286,7 +810,7 @@ The response to a preflight OPTIONS request will echo the requested "Access-Cont
### Using POST with URL parameters only
```
curl -X POST 'http://localhost:5572/rc/noop/?potato=1&sausage=2'
curl -X POST 'http://localhost:5572/rc/noop?potato=1&sausage=2'
```
Response
@@ -301,7 +825,7 @@ Response
Here is what an error response looks like:
```
curl -X POST 'http://localhost:5572/rc/error/?potato=1&sausage=2'
curl -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
```
```
@@ -317,7 +841,7 @@ curl -X POST 'http://localhost:5572/rc/error/?potato=1&sausage=2'
Note that curl doesn't return errors to the shell unless you use the `-f` option
```
$ curl -f -X POST 'http://localhost:5572/rc/error/?potato=1&sausage=2'
$ curl -f -X POST 'http://localhost:5572/rc/error?potato=1&sausage=2'
curl: (22) The requested URL returned error: 400 Bad Request
$ echo $?
22
@@ -326,7 +850,7 @@ $ echo $?
### Using POST with a form
```
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop/
curl --data "potato=1" --data "sausage=2" http://localhost:5572/rc/noop
```
Response
@@ -342,7 +866,7 @@ Note that you can combine these with URL parameters too with the POST
parameters taking precedence.
```
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop/?rutabaga=3&sausage=4"
curl --data "potato=1" --data "sausage=2" "http://localhost:5572/rc/noop?rutabaga=3&sausage=4"
```
Response
@@ -359,7 +883,7 @@ Response
### Using POST with a JSON blob
```
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop/
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' http://localhost:5572/rc/noop
```
response
@@ -375,7 +899,7 @@ This can be combined with URL parameters too if required. The JSON
blob takes precedence.
```
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop/?rutabaga=3&potato=4'
curl -H "Content-Type: application/json" -X POST -d '{"potato":2,"sausage":1}' 'http://localhost:5572/rc/noop?rutabaga=3&potato=4'
```
```

View File

@@ -32,6 +32,7 @@ import (
"github.com/ncw/rclone/fs/driveletter"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/text/unicode/norm"
@@ -445,6 +446,10 @@ func changeConfigPassword() {
// if configKey has been set, the file will be encrypted.
func saveConfig() error {
dir, name := filepath.Split(ConfigPath)
err := os.MkdirAll(dir, os.ModePerm)
if err != nil {
return errors.Wrap(err, "failed to create config directory")
}
f, err := ioutil.TempFile(dir, name)
if err != nil {
return errors.Errorf("Failed to create temp file for new config: %v", err)
@@ -897,18 +902,24 @@ func ChooseOption(o *fs.Option, name string) string {
return in
}
// Suppress the confirm prompts and return a function to undo that
func suppressConfirm() func() {
old := fs.Config.AutoConfirm
fs.Config.AutoConfirm = true
return func() {
fs.Config.AutoConfirm = old
}
}
// UpdateRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func UpdateRemote(name string, keyValues []string) error {
if len(keyValues)%2 != 0 {
return errors.New("found key without value")
}
func UpdateRemote(name string, keyValues rc.Params) error {
defer suppressConfirm()()
// Set the config
for i := 0; i < len(keyValues); i += 2 {
getConfigData().SetValue(name, keyValues[i], keyValues[i+1])
for k, v := range keyValues {
getConfigData().SetValue(name, k, fmt.Sprint(v))
}
RemoteConfig(name)
ShowRemote(name)
SaveConfig()
return nil
}
@@ -916,9 +927,7 @@ func UpdateRemote(name string, keyValues []string) error {
// CreateRemote creates a new remote with name, provider and a list of
// parameters which are key, value pairs. If update is set then it
// adds the new keys rather than replacing all of them.
func CreateRemote(name string, provider string, keyValues []string) error {
// Suppress Confirm
fs.Config.AutoConfirm = true
func CreateRemote(name string, provider string, keyValues rc.Params) error {
// Delete the old config if it exists
getConfigData().DeleteSection(name)
// Set the type
@@ -931,20 +940,12 @@ func CreateRemote(name string, provider string, keyValues []string) error {
// PasswordRemote adds the keyValues passed in to the remote of name.
// keyValues should be key, value pairs.
func PasswordRemote(name string, keyValues []string) error {
if len(keyValues) != 2 {
return errors.New("found key without value")
func PasswordRemote(name string, keyValues rc.Params) error {
defer suppressConfirm()()
for k, v := range keyValues {
keyValues[k] = obscure.MustObscure(fmt.Sprint(v))
}
// Suppress Confirm
fs.Config.AutoConfirm = true
passwd := obscure.MustObscure(keyValues[1])
if passwd != "" {
getConfigData().SetValue(name, keyValues[0], passwd)
RemoteConfig(name)
ShowRemote(name)
SaveConfig()
}
return nil
return UpdateRemote(name, keyValues)
}
// JSONListProviders prints all the providers and options in JSON format
@@ -1293,16 +1294,28 @@ func FileSections() []string {
return sections
}
// DumpRcRemote dumps the config for a single remote
func DumpRcRemote(name string) (dump rc.Params) {
params := rc.Params{}
for _, key := range getConfigData().GetKeyList(name) {
params[key] = FileGet(name, key)
}
return params
}
// DumpRcBlob dumps all the config as an unstructured blob suitable
// for the rc
func DumpRcBlob() (dump rc.Params) {
dump = rc.Params{}
for _, name := range getConfigData().GetSectionList() {
dump[name] = DumpRcRemote(name)
}
return dump
}
// Dump dumps all the config as a JSON file
func Dump() error {
dump := make(map[string]map[string]string)
for _, name := range getConfigData().GetSectionList() {
params := make(map[string]string)
for _, key := range getConfigData().GetKeyList(name) {
params[key] = FileGet(name, key)
}
dump[name] = params
}
dump := DumpRcBlob()
b, err := json.MarshalIndent(dump, "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal config dump")

View File

@@ -12,6 +12,7 @@ import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/rc"
"github.com/spf13/pflag"
)
@@ -31,6 +32,7 @@ var (
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("main", fs.Config)
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")

178
fs/config/rc.go Normal file
View File

@@ -0,0 +1,178 @@
package config
import (
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/rc"
)
func init() {
rc.Add(rc.Call{
Path: "config/dump",
Fn: rcDump,
Title: "Dumps the config file.",
AuthRequired: true,
Help: `
Returns a JSON object:
- key: value
Where keys are remote names and values are the config parameters.
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
`,
})
}
// Return the config file dump
func rcDump(in rc.Params) (out rc.Params, err error) {
return DumpRcBlob(), nil
}
func init() {
rc.Add(rc.Call{
Path: "config/get",
Fn: rcGet,
Title: "Get a remote in the config file.",
AuthRequired: true,
Help: `
Parameters:
- name - name of remote to get
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
`,
})
}
// Return the config file get
func rcGet(in rc.Params) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
return DumpRcRemote(name), nil
}
func init() {
rc.Add(rc.Call{
Path: "config/listremotes",
Fn: rcListRemotes,
Title: "Lists the remotes in the config file.",
AuthRequired: true,
Help: `
Returns
- remotes - array of remote names
See the [listremotes command](/commands/rclone_listremotes/) command for more information on the above.
`,
})
}
// Return the a list of remotes in the config file
func rcListRemotes(in rc.Params) (out rc.Params, err error) {
var remotes = []string{}
for _, remote := range getConfigData().GetSectionList() {
remotes = append(remotes, remote)
}
out = rc.Params{
"remotes": remotes,
}
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "config/providers",
Fn: rcProviders,
Title: "Shows how providers are configured in the config file.",
AuthRequired: true,
Help: `
Returns a JSON object:
- providers - array of objects
See the [config providers command](/commands/rclone_config_providers/) command for more information on the above.
`,
})
}
// Return the config file providers
func rcProviders(in rc.Params) (out rc.Params, err error) {
out = rc.Params{
"providers": fs.Registry,
}
return out, nil
}
func init() {
for _, name := range []string{"create", "update", "password"} {
name := name
extraHelp := ""
if name == "create" {
extraHelp = "- type - type of the new remote\n"
}
rc.Add(rc.Call{
Path: "config/" + name,
AuthRequired: true,
Fn: func(in rc.Params) (rc.Params, error) {
return rcConfig(in, name)
},
Title: name + " the config for a remote.",
Help: `This takes the following parameters
- name - name of remote
- type - type of new remote
` + extraHelp + `
See the [config ` + name + ` command](/commands/rclone_config_` + name + `/) command for more information on the above.`,
})
}
}
// Manipulate the config file
func rcConfig(in rc.Params, what string) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
parameters := rc.Params{}
err = in.GetStruct("parameters", &parameters)
if err != nil {
return nil, err
}
switch what {
case "create":
remoteType, err := in.GetString("type")
if err != nil {
return nil, err
}
return nil, CreateRemote(name, remoteType, parameters)
case "update":
return nil, UpdateRemote(name, parameters)
case "password":
return nil, PasswordRemote(name, parameters)
}
panic("unknown rcConfig type")
}
func init() {
rc.Add(rc.Call{
Path: "config/delete",
Fn: rcDelete,
Title: "Delete a remote in the config file.",
AuthRequired: true,
Help: `
Parameters:
- name - name of remote to delete
See the [config delete command](/commands/rclone_config_delete/) command for more information on the above.
`,
})
}
// Return the config file delete
func rcDelete(in rc.Params) (out rc.Params, err error) {
name, err := in.GetString("name")
if err != nil {
return nil, err
}
DeleteRemote(name)
return nil, nil
}

149
fs/config/rc_test.go Normal file
View File

@@ -0,0 +1,149 @@
package config
import (
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const testName = "configTestNameForRc"
func TestRc(t *testing.T) {
// Create the test remote
call := rc.Calls.Get("config/create")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
"type": "local",
"parameters": rc.Params{
"test_key": "sausage",
},
}
out, err := call.Fn(in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, "local", FileGet(testName, "type"))
assert.Equal(t, "sausage", FileGet(testName, "test_key"))
// The sub tests rely on the remote created above but they can
// all be run independently
t.Run("Dump", func(t *testing.T) {
call := rc.Calls.Get("config/dump")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
require.NotNil(t, out[testName])
config := out[testName].(rc.Params)
assert.Equal(t, "local", config["type"])
assert.Equal(t, "sausage", config["test_key"])
})
t.Run("Get", func(t *testing.T) {
call := rc.Calls.Get("config/get")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, "local", out["type"])
assert.Equal(t, "sausage", out["test_key"])
})
t.Run("ListRemotes", func(t *testing.T) {
call := rc.Calls.Get("config/listremotes")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
var remotes []string
err = out.GetStruct("remotes", &remotes)
require.NoError(t, err)
assert.Contains(t, remotes, testName)
})
t.Run("Update", func(t *testing.T) {
call := rc.Calls.Get("config/update")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
"parameters": rc.Params{
"test_key": "rutabaga",
"test_key2": "cabbage",
},
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "local", FileGet(testName, "type"))
assert.Equal(t, "rutabaga", FileGet(testName, "test_key"))
assert.Equal(t, "cabbage", FileGet(testName, "test_key2"))
})
t.Run("Password", func(t *testing.T) {
call := rc.Calls.Get("config/password")
assert.NotNil(t, call)
in := rc.Params{
"name": testName,
"parameters": rc.Params{
"test_key": "rutabaga",
"test_key2": "cabbage",
},
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "local", FileGet(testName, "type"))
assert.Equal(t, "rutabaga", obscure.MustReveal(FileGet(testName, "test_key")))
assert.Equal(t, "cabbage", obscure.MustReveal(FileGet(testName, "test_key2")))
})
// Delete the test remote
call = rc.Calls.Get("config/delete")
assert.NotNil(t, call)
in = rc.Params{
"name": testName,
}
out, err = call.Fn(in)
require.NoError(t, err)
assert.Nil(t, out)
assert.Equal(t, "", FileGet(testName, "type"))
assert.Equal(t, "", FileGet(testName, "test_key"))
}
func TestRcProviders(t *testing.T) {
call := rc.Calls.Get("config/providers")
assert.NotNil(t, call)
in := rc.Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
var registry []*fs.RegInfo
err = out.GetStruct("providers", &registry)
require.NoError(t, err)
foundLocal := false
for _, provider := range registry {
if provider.Name == "local" {
foundLocal = true
break
}
}
assert.True(t, foundLocal, "didn't find local provider")
}

View File

@@ -496,3 +496,31 @@ func (f *Filter) DumpFilters() string {
}
return strings.Join(rules, "\n")
}
// HaveFilesFrom returns true if --files-from has been supplied
func (f *Filter) HaveFilesFrom() bool {
return f.files != nil
}
var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR")
// MakeListR makes function to return all the files set using --files-from
func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.ListRFn {
return func(dir string, callback fs.ListRCallback) error {
if !f.HaveFilesFrom() {
return errFilesFromNotSet
}
var entries fs.DirEntries
for remote := range f.files {
entry, err := NewObject(remote)
if err == fs.ErrorObjectNotFound {
// Skip files that are not found
} else if err != nil {
return err
} else {
entries = append(entries, entry)
}
}
return callback(entries)
}
}

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -183,6 +184,83 @@ func TestNewFilterIncludeFilesDirs(t *testing.T) {
})
}
func TestNewFilterHaveFilesFrom(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
assert.Equal(t, false, f.HaveFilesFrom())
require.NoError(t, f.AddFile("file"))
assert.Equal(t, true, f.HaveFilesFrom())
}
func TestNewFilterMakeListR(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)
// Check error if no files
listR := f.MakeListR(nil)
err = listR("", nil)
assert.EqualError(t, err, errFilesFromNotSet.Error())
// Add some files
for _, path := range []string{
"path/to/dir/file1.png",
"/path/to/dir/file2.png",
"/path/to/file3.png",
"/path/to/dir2/file4.png",
"notfound",
} {
err = f.AddFile(path)
require.NoError(t, err)
}
assert.Equal(t, 5, len(f.files))
// NewObject function for MakeListR
newObjects := FilesMap{}
NewObject := func(remote string) (fs.Object, error) {
if remote == "notfound" {
return nil, fs.ErrorObjectNotFound
} else if remote == "error" {
return nil, assert.AnError
}
newObjects[remote] = struct{}{}
return mockobject.New(remote), nil
}
// Callback for ListRFn
listRObjects := FilesMap{}
listRcallback := func(entries fs.DirEntries) error {
for _, entry := range entries {
listRObjects[entry.Remote()] = struct{}{}
}
return nil
}
// Make the listR and call it
listR = f.MakeListR(NewObject)
err = listR("", listRcallback)
require.NoError(t, err)
// Check that the correct objects were created and listed
want := FilesMap{
"path/to/dir/file1.png": {},
"path/to/dir/file2.png": {},
"path/to/file3.png": {},
"path/to/dir2/file4.png": {},
}
assert.Equal(t, want, newObjects)
assert.Equal(t, want, listRObjects)
// Now check an error is returned from NewObject
require.NoError(t, f.AddFile("error"))
err = listR("", listRcallback)
require.EqualError(t, err, assert.AnError.Error())
}
func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter(nil)
require.NoError(t, err)

View File

@@ -4,6 +4,7 @@ package filterflags
import (
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fs/rc"
"github.com/spf13/pflag"
)
@@ -14,6 +15,7 @@ var (
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("filter", &Opt)
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")

View File

@@ -71,7 +71,7 @@ type listDirFn func(dir string) (entries fs.DirEntries, err error)
// makeListDir makes a listing function for the given fs and includeAll flags
func (m *March) makeListDir(f fs.Fs, includeAll bool) listDirFn {
if !fs.Config.UseListR || f.Features().ListR == nil {
if (!fs.Config.UseListR || f.Features().ListR == nil) && !filter.Active.HaveFilesFrom() {
return func(dir string) (entries fs.DirEntries, err error) {
return list.DirSorted(f, includeAll, dir)
}

View File

@@ -18,16 +18,32 @@ import (
)
// dedupeRename renames the objs slice to different names
func dedupeRename(remote string, objs []fs.Object) {
f := objs[0].Fs()
func dedupeRename(f fs.Fs, remote string, objs []fs.Object) {
doMove := f.Features().Move
if doMove == nil {
log.Fatalf("Fs %v doesn't support Move", f)
}
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
outer:
for i, o := range objs {
newName := fmt.Sprintf("%s-%d%s", base, i+1, ext)
suffix := 1
newName := fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
_, err := f.NewObject(newName)
for ; err != fs.ErrorObjectNotFound; suffix++ {
if err != nil {
fs.CountError(err)
fs.Errorf(o, "Failed to check for existing object: %v", err)
continue outer
}
if suffix > 100 {
fs.Errorf(o, "Could not find an available new name")
continue outer
}
newName = fmt.Sprintf("%s-%d%s", base, i+suffix, ext)
_, err = f.NewObject(newName)
}
if !fs.Config.DryRun {
newObj, err := doMove(o, newName)
if err != nil {
@@ -81,7 +97,7 @@ func dedupeDeleteIdentical(ht hash.Type, remote string, objs []fs.Object) (remai
}
// dedupeInteractive interactively dedupes the slice of objects
func dedupeInteractive(ht hash.Type, remote string, objs []fs.Object) {
func dedupeInteractive(f fs.Fs, ht hash.Type, remote string, objs []fs.Object) {
fmt.Printf("%s: %d duplicates remain\n", remote, len(objs))
for i, o := range objs {
md5sum, err := o.Hash(ht)
@@ -96,7 +112,7 @@ func dedupeInteractive(ht hash.Type, remote string, objs []fs.Object) {
keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs))
dedupeDeleteAllButOne(keep-1, remote, objs)
case 'r':
dedupeRename(remote, objs)
dedupeRename(f, remote, objs)
}
}
@@ -276,7 +292,7 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
}
switch mode {
case DeduplicateInteractive:
dedupeInteractive(ht, remote, objs)
dedupeInteractive(f, ht, remote, objs)
case DeduplicateFirst:
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateNewest:
@@ -286,7 +302,7 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
sort.Sort(objectsSortedByModTime(objs)) // sort oldest first
dedupeDeleteAllButOne(0, remote, objs)
case DeduplicateRename:
dedupeRename(remote, objs)
dedupeRename(f, remote, objs)
case DeduplicateLargest:
largest, largestIndex := int64(-1), -1
for i, obj := range objs {

View File

@@ -155,7 +155,8 @@ func TestDeduplicateRename(t *testing.T) {
file1 := r.WriteUncheckedObject("one.txt", "This is one", t1)
file2 := r.WriteUncheckedObject("one.txt", "This is one too", t2)
file3 := r.WriteUncheckedObject("one.txt", "This is another one", t3)
r.CheckWithDuplicates(t, file1, file2, file3)
file4 := r.WriteUncheckedObject("one-1.txt", "This is not a duplicate", t1)
r.CheckWithDuplicates(t, file1, file2, file3, file4)
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
require.NoError(t, err)
@@ -168,13 +169,20 @@ func TestDeduplicateRename(t *testing.T) {
remote := o.Remote()
if remote != "one-1.txt" &&
remote != "one-2.txt" &&
remote != "one-3.txt" {
remote != "one-3.txt" &&
remote != "one-4.txt" {
t.Errorf("Bad file name after rename %q", remote)
}
size := o.Size()
if size != file1.Size && size != file2.Size && size != file3.Size {
if size != file1.Size &&
size != file2.Size &&
size != file3.Size &&
size != file4.Size {
t.Errorf("Size not one of the object sizes %d", size)
}
if remote == "one-1.txt" && size != file4.Size {
t.Errorf("Existing non-duplicate file modified %q", remote)
}
})
return nil
}))

141
fs/operations/lsjson.go Normal file
View File

@@ -0,0 +1,141 @@
package operations
import (
"path"
"time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors"
)
// ListJSONItem in the struct which gets marshalled for each line
type ListJSONItem struct {
Path string
Name string
Encrypted string `json:",omitempty"`
Size int64
MimeType string `json:",omitempty"`
ModTime Timestamp //`json:",omitempty"`
IsDir bool
Hashes map[string]string `json:",omitempty"`
ID string `json:",omitempty"`
OrigID string `json:",omitempty"`
}
// Timestamp a time in RFC3339 format with Nanosecond precision secongs
type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON
func (t Timestamp) MarshalJSON() (out []byte, err error) {
tt := time.Time(t)
if tt.IsZero() {
return []byte(`""`), nil
}
return []byte(`"` + tt.Format(time.RFC3339Nano) + `"`), nil
}
// ListJSONOpt describes the options for ListJSON
type ListJSONOpt struct {
Recurse bool `json:"recurse"`
NoModTime bool `json:"noModTime"`
ShowEncrypted bool `json:"showEncrypted"`
ShowOrigIDs bool `json:"showOrigIDs"`
ShowHash bool `json:"showHash"`
}
// ListJSON lists fsrc using the options in opt calling callback for each item
func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJSONItem) error) error {
var cipher crypt.Cipher
if opt.ShowEncrypted {
fsInfo, _, _, config, err := fs.ConfigFs(fsrc.Name() + ":" + fsrc.Root())
if err != nil {
return errors.Wrap(err, "ListJSON failed to load config for crypt remote")
}
if fsInfo.Name != "crypt" {
return errors.New("The remote needs to be of type \"crypt\"")
}
cipher, err = crypt.NewCipher(config)
if err != nil {
return errors.Wrap(err, "ListJSON failed to make new crypt remote")
}
}
err := walk.Walk(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}
for _, entry := range entries {
item := ListJSONItem{
Path: entry.Remote(),
Name: path.Base(entry.Remote()),
Size: entry.Size(),
MimeType: fs.MimeTypeDirEntry(entry),
}
if !opt.NoModTime {
item.ModTime = Timestamp(entry.ModTime())
}
if cipher != nil {
switch entry.(type) {
case fs.Directory:
item.Encrypted = cipher.EncryptDirName(path.Base(entry.Remote()))
case fs.Object:
item.Encrypted = cipher.EncryptFileName(path.Base(entry.Remote()))
default:
fs.Errorf(nil, "Unknown type %T in listing", entry)
}
}
if do, ok := entry.(fs.IDer); ok {
item.ID = do.ID()
}
if opt.ShowOrigIDs {
cur := entry
for {
u, ok := cur.(fs.ObjectUnWrapper)
if !ok {
break // not a wrapped object, use current id
}
next := u.UnWrap()
if next == nil {
break // no base object found, use current id
}
cur = next
}
if do, ok := cur.(fs.IDer); ok {
item.OrigID = do.ID()
}
}
switch x := entry.(type) {
case fs.Directory:
item.IsDir = true
case fs.Object:
item.IsDir = false
if opt.ShowHash {
item.Hashes = make(map[string]string)
for _, hashType := range x.Fs().Hashes().Array() {
hash, err := x.Hash(hashType)
if err != nil {
fs.Errorf(x, "Failed to read hash: %v", err)
} else if hash != "" {
item.Hashes[hashType.String()] = hash
}
}
}
default:
fs.Errorf(nil, "Unknown type %T in listing in ListJSON", entry)
}
err = callback(&item)
if err != nil {
return errors.Wrap(err, "callback failed in ListJSON")
}
}
return nil
})
if err != nil {
return errors.Wrap(err, "error in ListJSON")
}
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"io/ioutil"
"net/http"
"path"
"sort"
"strconv"
@@ -975,7 +976,7 @@ func Purge(f fs.Fs, dir string) error {
if err != nil {
return err
}
err = Rmdirs(f, "", false)
err = Rmdirs(f, dir, false)
}
if err != nil {
fs.CountError(err)
@@ -1206,7 +1207,7 @@ func PublicLink(f fs.Fs, remote string) (string, error) {
// containing empty directories) under f, including f.
func Rmdirs(f fs.Fs, dir string, leaveRoot bool) error {
dirEmpty := make(map[string]bool)
dirEmpty[""] = !leaveRoot
dirEmpty[dir] = !leaveRoot
err := walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
@@ -1359,6 +1360,16 @@ func RcatSize(fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modT
return obj, nil
}
// CopyURL copies the data from the url to (fdst, dstFileName)
func CopyURL(fdst fs.Fs, dstFileName string, url string) (dst fs.Object, err error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer fs.CheckClose(resp.Body, &err)
return RcatSize(fdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
}
// moveOrCopyFile moves or copies a single file possibly to a new name
func moveOrCopyFile(fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
dstFilePath := path.Join(fdst.Root(), dstFileName)

View File

@@ -25,6 +25,8 @@ import (
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"regexp"
"strings"
"testing"
@@ -96,6 +98,33 @@ func TestLs(t *testing.T) {
assert.Contains(t, res, " 60 potato2\n")
}
func TestLsWithFilesFrom(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
file2 := r.WriteBoth("empty space", "", t2)
fstest.CheckItems(t, r.Fremote, file1, file2)
// Set the --files-from equivalent
f, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, f.AddFile("potato2"))
require.NoError(t, f.AddFile("notfound"))
// Monkey patch the active filter
oldFilter := filter.Active
filter.Active = f
defer func() {
filter.Active = oldFilter
}()
var buf bytes.Buffer
err = operations.List(r.Fremote, &buf)
require.NoError(t, err)
assert.Equal(t, " 60 potato2\n", buf.String())
}
func TestLsLong(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
@@ -374,6 +403,78 @@ func TestRcat(t *testing.T) {
check(false)
}
func TestPurge(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
r.Mkdir(r.Fremote)
// Make some files and dirs
r.ForceMkdir(r.Fremote)
file1 := r.WriteObject("A1/B1/C1/one", "aaa", t1)
//..and dirs we expect to delete
require.NoError(t, operations.Mkdir(r.Fremote, "A2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B2/C2"))
require.NoError(t, operations.Mkdir(r.Fremote, "A1/B1/C3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3"))
require.NoError(t, operations.Mkdir(r.Fremote, "A3/B3/C4"))
//..and one more file at the end
file2 := r.WriteObject("A1/two", "bbb", t2)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
"A2",
"A1/B2",
"A1/B2/C2",
"A1/B1/C3",
"A3",
"A3/B3",
"A3/B3/C4",
},
fs.GetModifyWindow(r.Fremote),
)
require.NoError(t, operations.Purge(r.Fremote, "A1/B1"))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file2,
},
[]string{
"A1",
"A2",
"A1/B2",
"A1/B2/C2",
"A3",
"A3/B3",
"A3/B3/C4",
},
fs.GetModifyWindow(r.Fremote),
)
require.NoError(t, operations.Purge(r.Fremote, ""))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{},
[]string{},
fs.GetModifyWindow(r.Fremote),
)
}
func TestRmdirsNoLeaveRoot(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
@@ -414,6 +515,28 @@ func TestRmdirsNoLeaveRoot(t *testing.T) {
fs.GetModifyWindow(r.Fremote),
)
require.NoError(t, operations.Rmdirs(r.Fremote, "A3/B3/C4", false))
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1, file2,
},
[]string{
"A1",
"A1/B1",
"A1/B1/C1",
"A2",
"A1/B2",
"A1/B2/C2",
"A1/B1/C3",
"A3",
"A3/B3",
},
fs.GetModifyWindow(r.Fremote),
)
require.NoError(t, operations.Rmdirs(r.Fremote, "", false))
fstest.CheckListingWithPrecision(
@@ -494,6 +617,28 @@ func TestRcatSize(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1, file2)
}
func TestCopyURL(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
contents := "file1 contents\n"
file1 := r.WriteFile("file1", contents, t1)
r.Mkdir(r.Fremote)
fstest.CheckItems(t, r.Fremote)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte(contents))
assert.NoError(t, err)
}))
defer ts.Close()
o, err := operations.CopyURL(r.Fremote, "file1", ts.URL)
require.NoError(t, err)
assert.Equal(t, int64(len(contents)), o.Size())
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, nil, fs.ModTimeNotSupported)
}
func TestMoveFile(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()

260
fs/operations/rc.go Normal file
View File

@@ -0,0 +1,260 @@
package operations
import (
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
)
func init() {
rc.Add(rc.Call{
Path: "operations/list",
AuthRequired: true,
Fn: rcList,
Title: "List the given remote and path in JSON format",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
- opt - a dictionary of options to control the listing (optional)
- recurse - If set recurse directories
- noModTime - If set return modification time
- showEncrypted - If set show decrypted names
- showOrigIDs - If set show the IDs for each item if known
- showHash - If set return a dictionary of hashes
The result is
- list
- This is an array of objects as described in the lsjson command
See the lsjson command for more information on the above and examples.
`,
})
}
// List the directory
func rcList(in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(in)
if err != nil {
return nil, err
}
var opt ListJSONOpt
err = in.GetStruct("opt", &opt)
if rc.NotErrParamNotFound(err) {
return nil, err
}
var list = []*ListJSONItem{}
err = ListJSON(f, remote, &opt, func(item *ListJSONItem) error {
list = append(list, item)
return nil
})
if err != nil {
return nil, err
}
out = make(rc.Params)
out["list"] = list
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/about",
AuthRequired: true,
Fn: rcAbout,
Title: "Return the space used on the remote",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
The result is as returned from rclone about --json
`,
})
}
// About the remote
func rcAbout(in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(in)
if err != nil {
return nil, err
}
doAbout := f.Features().About
if doAbout == nil {
return nil, errors.Errorf("%v doesn't support about", f)
}
u, err := doAbout()
if err != nil {
return nil, errors.Wrap(err, "about call failed")
}
err = rc.Reshape(&out, u)
if err != nil {
return nil, errors.Wrap(err, "about Reshape failed")
}
return out, nil
}
func init() {
for _, copy := range []bool{false, true} {
copy := copy
name := "Move"
if copy {
name = "Copy"
}
rc.Add(rc.Call{
Path: "operations/" + strings.ToLower(name) + "file",
AuthRequired: true,
Fn: func(in rc.Params) (rc.Params, error) {
return rcMoveOrCopyFile(in, copy)
},
Title: name + " a file from source remote to destination remote",
Help: `This takes the following parameters
- srcFs - a remote name string eg "drive:" for the source
- srcRemote - a path within that remote eg "file.txt" for the source
- dstFs - a remote name string eg "drive2:" for the destination
- dstRemote - a path within that remote eg "file2.txt" for the destination
This returns
- jobid - ID of async job to query with job/status
`,
})
}
}
// Copy a file
func rcMoveOrCopyFile(in rc.Params, cp bool) (out rc.Params, err error) {
srcFs, srcRemote, err := rc.GetFsAndRemoteNamed(in, "srcFs", "srcRemote")
if err != nil {
return nil, err
}
dstFs, dstRemote, err := rc.GetFsAndRemoteNamed(in, "dstFs", "dstRemote")
if err != nil {
return nil, err
}
return nil, moveOrCopyFile(dstFs, srcFs, dstRemote, srcRemote, cp)
}
func init() {
for _, op := range []struct {
name string
title string
help string
noRemote bool
}{
{name: "mkdir", title: "Make a destination directory or container"},
{name: "rmdir", title: "Remove an empty directory or container"},
{name: "purge", title: "Remove a directory or container and all of its contents"},
{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"},
{name: "delete", title: "Remove files in the path", noRemote: true},
{name: "deletefile", title: "Remove the single file pointed to"},
{name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n"},
{name: "cleanup", title: "Remove trashed files in the remote or path", noRemote: true},
} {
op := op
remote := "- remote - a path within that remote eg \"dir\"\n"
if op.noRemote {
remote = ""
}
rc.Add(rc.Call{
Path: "operations/" + op.name,
AuthRequired: true,
Fn: func(in rc.Params) (rc.Params, error) {
return rcSingleCommand(in, op.name, op.noRemote)
},
Title: op.title,
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
` + remote + op.help + `
See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for more information on the above.
`,
})
}
}
// Mkdir a directory
func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) {
var (
f fs.Fs
remote string
)
if noRemote {
f, err = rc.GetFs(in)
} else {
f, remote, err = rc.GetFsAndRemote(in)
}
if err != nil {
return nil, err
}
switch name {
case "mkdir":
return nil, Mkdir(f, remote)
case "rmdir":
return nil, Rmdir(f, remote)
case "purge":
return nil, Purge(f, remote)
case "rmdirs":
leaveRoot, err := in.GetBool("leaveRoot")
if rc.NotErrParamNotFound(err) {
return nil, err
}
return nil, Rmdirs(f, remote, leaveRoot)
case "delete":
return nil, Delete(f)
case "deletefile":
o, err := f.NewObject(remote)
if err != nil {
return nil, err
}
return nil, DeleteFile(o)
case "copyurl":
url, err := in.GetString("url")
if err != nil {
return nil, err
}
_, err = CopyURL(f, remote, url)
return nil, err
case "cleanup":
return nil, CleanUp(f)
}
panic("unknown rcSingleCommand type")
}
func init() {
rc.Add(rc.Call{
Path: "operations/size",
AuthRequired: true,
Fn: rcSize,
Title: "Count the number of bytes and files in remote",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:path/to/dir"
Returns
- count - number of files
- bytes - number of bytes in those files
See the [size command](/commands/rclone_size/) command for more information on the above.
`,
})
}
// Mkdir a directory
func rcSize(in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(in)
if err != nil {
return nil, err
}
count, bytes, err := Count(f)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["count"] = count
out["bytes"] = bytes
return out, nil
}

358
fs/operations/rc_test.go Normal file
View File

@@ -0,0 +1,358 @@
package operations_test
import (
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func rcNewRun(t *testing.T, method string) (*fstest.Run, *rc.Call) {
if *fstest.RemoteName != "" {
t.Skip("Skipping test on non local remote")
}
r := fstest.NewRun(t)
call := rc.Calls.Get(method)
assert.NotNil(t, call)
rc.PutCachedFs(r.LocalName, r.Flocal)
rc.PutCachedFs(r.FremoteName, r.Fremote)
return r, call
}
// operations/about: Return the space used on the remote
func TestRcAbout(t *testing.T) {
r, call := rcNewRun(t, "operations/about")
defer r.Finalise()
r.Mkdir(r.Fremote)
// Will get an error if remote doesn't support About
expectedErr := r.Fremote.Features().About == nil
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(in)
if expectedErr {
assert.Error(t, err)
return
}
require.NoError(t, err)
// Can't really check the output much!
assert.NotEqual(t, int64(0), out["Total"])
}
// operations/cleanup: Remove trashed files in the remote or path
func TestRcCleanup(t *testing.T) {
r, call := rcNewRun(t, "operations/cleanup")
defer r.Finalise()
in := rc.Params{
"fs": r.LocalName,
}
out, err := call.Fn(in)
require.Error(t, err)
assert.Equal(t, rc.Params(nil), out)
assert.Contains(t, err.Error(), "doesn't support cleanup")
}
// operations/copyfile: Copy a file from source remote to destination remote
func TestRcCopyfile(t *testing.T) {
r, call := rcNewRun(t, "operations/copyfile")
defer r.Finalise()
file1 := r.WriteFile("file1", "file1 contents", t1)
r.Mkdir(r.Fremote)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
in := rc.Params{
"srcFs": r.LocalName,
"srcRemote": "file1",
"dstFs": r.FremoteName,
"dstRemote": "file1-renamed",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Flocal, file1)
file1.Path = "file1-renamed"
fstest.CheckItems(t, r.Fremote, file1)
}
// operations/copyurl: Copy the URL to the object
func TestRcCopyurl(t *testing.T) {
r, call := rcNewRun(t, "operations/copyurl")
defer r.Finalise()
contents := "file1 contents\n"
file1 := r.WriteFile("file1", contents, t1)
r.Mkdir(r.Fremote)
fstest.CheckItems(t, r.Fremote)
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, err := w.Write([]byte(contents))
assert.NoError(t, err)
}))
defer ts.Close()
in := rc.Params{
"fs": r.FremoteName,
"remote": "file1",
"url": ts.URL,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, nil, fs.ModTimeNotSupported)
}
// operations/delete: Remove files in the path
func TestRcDelete(t *testing.T) {
r, call := rcNewRun(t, "operations/delete")
defer r.Finalise()
file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject("large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Fremote)
}
// operations/deletefile: Remove the single file pointed to
func TestRcDeletefile(t *testing.T) {
r, call := rcNewRun(t, "operations/deletefile")
defer r.Finalise()
file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject("medium", "------------------------------------------------------------", t1) // 60 bytes
fstest.CheckItems(t, r.Fremote, file1, file2)
in := rc.Params{
"fs": r.FremoteName,
"remote": "small",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Fremote, file2)
}
// operations/list: List the given remote and path in JSON format
func TestRcList(t *testing.T) {
r, call := rcNewRun(t, "operations/list")
defer r.Finalise()
file1 := r.WriteObject("a", "a", t1)
file2 := r.WriteObject("subdir/b", "bb", t2)
fstest.CheckItems(t, r.Fremote, file1, file2)
in := rc.Params{
"fs": r.FremoteName,
"remote": "",
}
out, err := call.Fn(in)
require.NoError(t, err)
list := out["list"].([]*operations.ListJSONItem)
assert.Equal(t, 2, len(list))
checkFile1 := func(got *operations.ListJSONItem) {
assert.WithinDuration(t, t1, time.Time(got.ModTime), time.Second)
assert.Equal(t, "a", got.Path)
assert.Equal(t, "a", got.Name)
assert.Equal(t, int64(1), got.Size)
assert.Equal(t, "application/octet-stream", got.MimeType)
assert.Equal(t, false, got.IsDir)
}
checkFile1(list[0])
checkSubdir := func(got *operations.ListJSONItem) {
assert.Equal(t, "subdir", got.Path)
assert.Equal(t, "subdir", got.Name)
assert.Equal(t, int64(-1), got.Size)
assert.Equal(t, "inode/directory", got.MimeType)
assert.Equal(t, true, got.IsDir)
}
checkSubdir(list[1])
in = rc.Params{
"fs": r.FremoteName,
"remote": "",
"opt": rc.Params{
"recurse": true,
},
}
out, err = call.Fn(in)
require.NoError(t, err)
list = out["list"].([]*operations.ListJSONItem)
assert.Equal(t, 3, len(list))
checkFile1(list[0])
checkSubdir(list[1])
checkFile2 := func(got *operations.ListJSONItem) {
assert.WithinDuration(t, t2, time.Time(got.ModTime), time.Second)
assert.Equal(t, "subdir/b", got.Path)
assert.Equal(t, "b", got.Name)
assert.Equal(t, int64(2), got.Size)
assert.Equal(t, "application/octet-stream", got.MimeType)
assert.Equal(t, false, got.IsDir)
}
checkFile2(list[2])
}
// operations/mkdir: Make a destination directory or container
func TestRcMkdir(t *testing.T) {
r, call := rcNewRun(t, "operations/mkdir")
defer r.Finalise()
r.Mkdir(r.Fremote)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote))
}
// operations/movefile: Move a file from source remote to destination remote
func TestRcMovefile(t *testing.T) {
r, call := rcNewRun(t, "operations/movefile")
defer r.Finalise()
file1 := r.WriteFile("file1", "file1 contents", t1)
r.Mkdir(r.Fremote)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote)
in := rc.Params{
"srcFs": r.LocalName,
"srcRemote": "file1",
"dstFs": r.FremoteName,
"dstRemote": "file1-renamed",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Flocal)
file1.Path = "file1-renamed"
fstest.CheckItems(t, r.Fremote, file1)
}
// operations/purge: Remove a directory or container and all of its contents
func TestRcPurge(t *testing.T) {
r, call := rcNewRun(t, "operations/purge")
defer r.Finalise()
file1 := r.WriteObject("subdir/file1", "subdir/file1 contents", t1)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote))
}
// operations/rmdir: Remove an empty directory or container
func TestRcRmdir(t *testing.T) {
r, call := rcNewRun(t, "operations/rmdir")
defer r.Finalise()
r.Mkdir(r.Fremote)
assert.NoError(t, r.Fremote.Mkdir("subdir"))
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote))
}
// operations/rmdirs: Remove all the empty directories in the path
func TestRcRmdirs(t *testing.T) {
r, call := rcNewRun(t, "operations/rmdirs")
defer r.Finalise()
r.Mkdir(r.Fremote)
assert.NoError(t, r.Fremote.Mkdir("subdir"))
assert.NoError(t, r.Fremote.Mkdir("subdir/subsubdir"))
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir", "subdir/subsubdir"}, fs.GetModifyWindow(r.Fremote))
in := rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(r.Fremote))
assert.NoError(t, r.Fremote.Mkdir("subdir"))
assert.NoError(t, r.Fremote.Mkdir("subdir/subsubdir"))
in = rc.Params{
"fs": r.FremoteName,
"remote": "subdir",
"leaveRoot": true,
}
out, err = call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(r.Fremote))
}
// operations/size: Count the number of bytes and files in remote
func TestRcSize(t *testing.T) {
r, call := rcNewRun(t, "operations/size")
defer r.Finalise()
file1 := r.WriteObject("small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject("subdir/medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject("subdir/subsubdir/large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 50 bytes
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params{
"count": int64(3),
"bytes": int64(120),
}, out)
}

117
fs/rc/cache.go Normal file
View File

@@ -0,0 +1,117 @@
// This implements the Fs cache
package rc
import (
"sync"
"time"
"github.com/ncw/rclone/fs"
)
var (
fsCacheMu sync.Mutex
fsCache = map[string]*cacheEntry{}
fsNewFs = fs.NewFs // for tests
expireRunning = false
cacheExpireDuration = 300 * time.Second // expire the cache entry when it is older than this
cacheExpireInterval = 60 * time.Second // interval to run the cache expire
)
type cacheEntry struct {
f fs.Fs
fsString string
lastUsed time.Time
}
// GetCachedFs gets a fs.Fs named fsString either from the cache or creates it afresh
func GetCachedFs(fsString string) (f fs.Fs, err error) {
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
entry, ok := fsCache[fsString]
if !ok {
f, err = fsNewFs(fsString)
if err != nil {
return nil, err
}
entry = &cacheEntry{
f: f,
fsString: fsString,
}
fsCache[fsString] = entry
}
entry.lastUsed = time.Now()
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
return entry.f, err
}
// PutCachedFs puts an fs.Fs named fsString into the cache
func PutCachedFs(fsString string, f fs.Fs) {
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
fsCache[fsString] = &cacheEntry{
f: f,
fsString: fsString,
lastUsed: time.Now(),
}
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
}
// cacheExpire expires any entries that haven't been used recently
func cacheExpire() {
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
now := time.Now()
for fsString, entry := range fsCache {
if now.Sub(entry.lastUsed) > cacheExpireDuration {
delete(fsCache, fsString)
}
}
if len(fsCache) != 0 {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
} else {
expireRunning = false
}
}
// GetFsNamed gets a fs.Fs named fsName either from the cache or creates it afresh
func GetFsNamed(in Params, fsName string) (f fs.Fs, err error) {
fsString, err := in.GetString(fsName)
if err != nil {
return nil, err
}
return GetCachedFs(fsString)
}
// GetFs gets a fs.Fs named "fs" either from the cache or creates it afresh
func GetFs(in Params) (f fs.Fs, err error) {
return GetFsNamed(in, "fs")
}
// GetFsAndRemoteNamed gets the fsName parameter from in, makes a
// remote or fetches it from the cache then gets the remoteName
// parameter from in too.
func GetFsAndRemoteNamed(in Params, fsName, remoteName string) (f fs.Fs, remote string, err error) {
remote, err = in.GetString(remoteName)
if err != nil {
return
}
f, err = GetFsNamed(in, fsName)
return
}
// GetFsAndRemote gets the `fs` parameter from in, makes a remote or
// fetches it from the cache then gets the `remote` parameter from in
// too.
func GetFsAndRemote(in Params) (f fs.Fs, remote string, err error) {
return GetFsAndRemoteNamed(in, "fs", "remote")
}

138
fs/rc/cache_test.go Normal file
View File

@@ -0,0 +1,138 @@
package rc
import (
"testing"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var called = 0
func mockNewFs(t *testing.T) func() {
called = 0
oldFsNewFs := fsNewFs
fsNewFs = func(path string) (fs.Fs, error) {
assert.Equal(t, 0, called)
called++
assert.Equal(t, "/", path)
return mockfs.NewFs("mock", "mock"), nil
}
return func() {
fsNewFs = oldFsNewFs
fsCacheMu.Lock()
fsCache = map[string]*cacheEntry{}
expireRunning = false
fsCacheMu.Unlock()
}
}
func TestGetCachedFs(t *testing.T) {
defer mockNewFs(t)()
assert.Equal(t, 0, len(fsCache))
f, err := GetCachedFs("/")
require.NoError(t, err)
assert.Equal(t, 1, len(fsCache))
f2, err := GetCachedFs("/")
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestCacheExpire(t *testing.T) {
defer mockNewFs(t)()
cacheExpireInterval = time.Millisecond
assert.Equal(t, false, expireRunning)
_, err := GetCachedFs("/")
require.NoError(t, err)
fsCacheMu.Lock()
entry := fsCache["/"]
assert.Equal(t, 1, len(fsCache))
fsCacheMu.Unlock()
cacheExpire()
fsCacheMu.Lock()
assert.Equal(t, 1, len(fsCache))
entry.lastUsed = time.Now().Add(-cacheExpireDuration - 60*time.Second)
assert.Equal(t, true, expireRunning)
fsCacheMu.Unlock()
time.Sleep(10 * time.Millisecond)
fsCacheMu.Lock()
assert.Equal(t, false, expireRunning)
assert.Equal(t, 0, len(fsCache))
fsCacheMu.Unlock()
}
func TestGetFsNamed(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"potato": "/",
}
f, err := GetFsNamed(in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
in = Params{
"sausage": "/",
}
f, err = GetFsNamed(in, "potato")
require.Error(t, err)
assert.Nil(t, f)
}
func TestGetFs(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
}
f, err := GetFs(in)
require.NoError(t, err)
assert.NotNil(t, f)
}
func TestGetFsAndRemoteNamed(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
"remote": "hello",
}
f, remote, err := GetFsAndRemoteNamed(in, "fs", "remote")
require.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, "hello", remote)
f, remote, err = GetFsAndRemoteNamed(in, "fsX", "remote")
require.Error(t, err)
assert.Nil(t, f)
f, remote, err = GetFsAndRemoteNamed(in, "fs", "remoteX")
require.Error(t, err)
assert.Nil(t, f)
}
func TestGetFsAndRemote(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"fs": "/",
"remote": "hello",
}
f, remote, err := GetFsAndRemote(in)
require.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, "hello", remote)
}

95
fs/rc/config.go Normal file
View File

@@ -0,0 +1,95 @@
// Implement config options reading and writing
//
// This is done here rather than in fs/fs.go so we don't cause a circular dependency
package rc
import (
"github.com/pkg/errors"
)
var optionBlock = map[string]interface{}{}
// AddOption adds an option set
func AddOption(name string, option interface{}) {
optionBlock[name] = option
}
func init() {
Add(Call{
Path: "options/blocks",
Fn: rcOptionsBlocks,
Title: "List all the option blocks",
Help: `Returns
- options - a list of the options block names`,
})
}
// Show the list of all the option blocks
func rcOptionsBlocks(in Params) (out Params, err error) {
options := []string{}
for name := range optionBlock {
options = append(options, name)
}
out = make(Params)
out["options"] = options
return out, nil
}
func init() {
Add(Call{
Path: "options/get",
Fn: rcOptionsGet,
Title: "Get all the options",
Help: `Returns an object where keys are option block names and values are an
object with the current option values in.
This shows the internal names of the option within rclone which should
map to the external options very easily with a few exceptions.
`,
})
}
// Show the list of all the option blocks
func rcOptionsGet(in Params) (out Params, err error) {
out = make(Params)
for name, options := range optionBlock {
out[name] = options
}
return out, nil
}
func init() {
Add(Call{
Path: "options/set",
Fn: rcOptionsSet,
Title: "Set an option",
Help: `Parameters
- option block name containing an object with
- key: value
Repeated as often as required.
Only supply the options you wish to change. If an option is unknown
it will be silently ignored. Not all options will have an effect when
changed like this.
`,
})
}
// Set an option in an option block
func rcOptionsSet(in Params) (out Params, err error) {
for name, options := range in {
current := optionBlock[name]
if current == nil {
return nil, errors.Errorf("unknown option block %q", name)
}
err := Reshape(current, options)
if err != nil {
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
}
}
return out, nil
}

88
fs/rc/config_test.go Normal file
View File

@@ -0,0 +1,88 @@
package rc
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func clearOptionBlock() {
optionBlock = map[string]interface{}{}
}
var testOptions = struct {
String string
Int int
}{
String: "hello",
Int: 42,
}
func TestAddOption(t *testing.T) {
defer clearOptionBlock()
assert.Equal(t, len(optionBlock), 0)
AddOption("potato", &testOptions)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, &testOptions, optionBlock["potato"])
}
func TestOptionsBlocks(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
call := Calls.Get("options/blocks")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"options": []string{"potato"}}, out)
}
func TestOptionsGet(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
call := Calls.Get("options/get")
require.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"potato": &testOptions}, out)
}
func TestOptionsSet(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
call := Calls.Get("options/set")
require.NotNil(t, call)
in := Params{
"potato": Params{
"Int": 50,
},
}
out, err := call.Fn(in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, 50, testOptions.Int)
assert.Equal(t, "hello", testOptions.String)
// unknown option block
in = Params{
"sausage": Params{
"Int": 50,
},
}
out, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "unknown option block")
// bad shape
in = Params{
"potato": []string{"a", "b"},
}
out, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to write options")
}

View File

@@ -6,10 +6,23 @@ import (
"os"
"runtime"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/version"
"github.com/pkg/errors"
)
func init() {
Add(Call{
Path: "rc/noopauth",
AuthRequired: true,
Fn: rcNoop,
Title: "Echo the input to the output parameters requiring auth",
Help: `
This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.`,
})
Add(Call{
Path: "rc/noop",
Fn: rcNoop,
@@ -19,6 +32,14 @@ This echoes the input parameters to the output parameters for testing
purposes. It can be used to check that rclone is still alive and to
check that parameter passing is working properly.`,
})
}
// Echo the input to the ouput parameters
func rcNoop(in Params) (out Params, err error) {
return in, nil
}
func init() {
Add(Call{
Path: "rc/error",
Fn: rcError,
@@ -27,6 +48,14 @@ check that parameter passing is working properly.`,
This returns an error with the input as part of its error string.
Useful for testing error handling.`,
})
}
// Return an error regardless
func rcError(in Params) (out Params, err error) {
return nil, errors.Errorf("arbitrary error on input %+v", in)
}
func init() {
Add(Call{
Path: "rc/list",
Fn: rcList,
@@ -35,6 +64,16 @@ Useful for testing error handling.`,
This lists all the registered remote control commands as a JSON map in
the commands response.`,
})
}
// List the registered commands
func rcList(in Params) (out Params, err error) {
out = make(Params)
out["commands"] = Calls.List()
return out, nil
}
func init() {
Add(Call{
Path: "core/pid",
Fn: rcPid,
@@ -43,6 +82,16 @@ the commands response.`,
This returns PID of current process.
Useful for stopping rclone process.`,
})
}
// Return PID of current process
func rcPid(in Params) (out Params, err error) {
out = make(Params)
out["pid"] = os.Getpid()
return out, nil
}
func init() {
Add(Call{
Path: "core/memstats",
Fn: rcMemStats,
@@ -59,40 +108,6 @@ The most interesting values for most people are:
* It is virtual memory so may include unused memory
`,
})
Add(Call{
Path: "core/gc",
Fn: rcGc,
Title: "Runs a garbage collection.",
Help: `
This tells the go runtime to do a garbage collection run. It isn't
necessary to call this normally, but it can be useful for debugging
memory problems.
`,
})
}
// Echo the input to the ouput parameters
func rcNoop(in Params) (out Params, err error) {
return in, nil
}
// Return an error regardless
func rcError(in Params) (out Params, err error) {
return nil, errors.Errorf("arbitrary error on input %+v", in)
}
// List the registered commands
func rcList(in Params) (out Params, err error) {
out = make(Params)
out["commands"] = registry.list()
return out, nil
}
// Return PID of current process
func rcPid(in Params) (out Params, err error) {
out = make(Params)
out["pid"] = os.Getpid()
return out, nil
}
// Return the memory statistics
@@ -123,9 +138,88 @@ func rcMemStats(in Params) (out Params, err error) {
return out, nil
}
func init() {
Add(Call{
Path: "core/gc",
Fn: rcGc,
Title: "Runs a garbage collection.",
Help: `
This tells the go runtime to do a garbage collection run. It isn't
necessary to call this normally, but it can be useful for debugging
memory problems.
`,
})
}
// Do a garbage collection run
func rcGc(in Params) (out Params, err error) {
out = make(Params)
runtime.GC()
return nil, nil
}
func init() {
Add(Call{
Path: "core/version",
Fn: rcVersion,
Title: "Shows the current version of rclone and the go runtime.",
Help: `
This shows the current version of go and the go runtime
- version - rclone version, eg "v1.44"
- decomposed - version number as [major, minor, patch, subpatch]
- note patch and subpatch will be 999 for a git compiled version
- isGit - boolean - true if this was compiled from the git version
- os - OS in use as according to Go
- arch - cpu architecture in use according to Go
- goVersion - version of Go runtime in use
`,
})
}
// Return version info
func rcVersion(in Params) (out Params, err error) {
decomposed, err := version.New(fs.Version)
if err != nil {
return nil, err
}
out = Params{
"version": fs.Version,
"decomposed": decomposed,
"isGit": decomposed.IsGit(),
"os": runtime.GOOS,
"arch": runtime.GOARCH,
"goVersion": runtime.Version(),
}
return out, nil
}
func init() {
Add(Call{
Path: "core/obscure",
Fn: rcObscure,
Title: "Obscures a string passed in.",
Help: `
Pass a clear string and rclone will obscure it for the config file:
- clear - string
Returns
- obscured - string
`,
})
}
// Return obscured string
func rcObscure(in Params) (out Params, err error) {
clear, err := in.GetString("clear")
if err != nil {
return nil, err
}
obscured, err := obscure.Obscure(clear)
if err != nil {
return nil, err
}
out = Params{
"obscured": obscured,
}
return out, nil
}

108
fs/rc/internal_test.go Normal file
View File

@@ -0,0 +1,108 @@
package rc
import (
"runtime"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/version"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInternalNoop(t *testing.T) {
call := Calls.Get("rc/noop")
assert.NotNil(t, call)
in := Params{
"String": "hello",
"Int": 42,
}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, in, out)
}
func TestInternalError(t *testing.T) {
call := Calls.Get("rc/error")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.Error(t, err)
require.Nil(t, out)
}
func TestInternalList(t *testing.T) {
call := Calls.Get("rc/list")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"commands": Calls.List()}, out)
}
func TestCorePid(t *testing.T) {
call := Calls.Get("core/pid")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
pid := out["pid"]
assert.NotEqual(t, nil, pid)
_, ok := pid.(int)
assert.Equal(t, true, ok)
}
func TestCoreMemstats(t *testing.T) {
call := Calls.Get("core/memstats")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
sys := out["Sys"]
assert.NotEqual(t, nil, sys)
_, ok := sys.(uint64)
assert.Equal(t, true, ok)
}
func TestCoreGC(t *testing.T) {
call := Calls.Get("core/gc")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.Nil(t, out)
assert.Equal(t, Params(nil), out)
}
func TestCoreVersion(t *testing.T) {
call := Calls.Get("core/version")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, fs.Version, out["version"])
assert.Equal(t, runtime.GOOS, out["os"])
assert.Equal(t, runtime.GOARCH, out["arch"])
assert.Equal(t, runtime.Version(), out["goVersion"])
_ = out["isGit"].(bool)
v := out["decomposed"].(version.Version)
assert.True(t, len(v) >= 2)
}
func TestCoreObscure(t *testing.T) {
call := Calls.Get("core/obscure")
assert.NotNil(t, call)
in := Params{
"clear": "potato",
}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, in["clear"], obscure.MustReveal(out["obscured"].(string)))
}

215
fs/rc/job.go Normal file
View File

@@ -0,0 +1,215 @@
// Manage background jobs that the rc is running
package rc
import (
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
)
const (
// expire the job when it is finished and older than this
expireDuration = 60 * time.Second
// inteval to run the expire cache
expireInterval = 10 * time.Second
)
// Job describes a asynchronous task started via the rc package
type Job struct {
mu sync.Mutex
ID int64 `json:"id"`
StartTime time.Time `json:"startTime"`
EndTime time.Time `json:"endTime"`
Error string `json:"error"`
Finished bool `json:"finished"`
Success bool `json:"success"`
Duration float64 `json:"duration"`
Output Params `json:"output"`
}
// Jobs describes a collection of running tasks
type Jobs struct {
mu sync.RWMutex
jobs map[int64]*Job
expireInterval time.Duration
expireRunning bool
}
var (
running = newJobs()
jobID = int64(0)
)
// newJobs makes a new Jobs structure
func newJobs() *Jobs {
return &Jobs{
jobs: map[int64]*Job{},
expireInterval: expireInterval,
}
}
// kickExpire makes sure Expire is running
func (jobs *Jobs) kickExpire() {
jobs.mu.Lock()
defer jobs.mu.Unlock()
if !jobs.expireRunning {
time.AfterFunc(jobs.expireInterval, jobs.Expire)
jobs.expireRunning = true
}
}
// Expire expires any jobs that haven't been collected
func (jobs *Jobs) Expire() {
jobs.mu.Lock()
defer jobs.mu.Unlock()
now := time.Now()
for ID, job := range jobs.jobs {
job.mu.Lock()
if job.Finished && now.Sub(job.EndTime) > expireDuration {
delete(jobs.jobs, ID)
}
job.mu.Unlock()
}
if len(jobs.jobs) != 0 {
time.AfterFunc(jobs.expireInterval, jobs.Expire)
jobs.expireRunning = true
} else {
jobs.expireRunning = false
}
}
// IDs returns the IDs of the running jobs
func (jobs *Jobs) IDs() (IDs []int64) {
jobs.mu.RLock()
defer jobs.mu.RUnlock()
IDs = []int64{}
for ID := range jobs.jobs {
IDs = append(IDs, ID)
}
return IDs
}
// Get a job with a given ID or nil if it doesn't exist
func (jobs *Jobs) Get(ID int64) *Job {
jobs.mu.RLock()
defer jobs.mu.RUnlock()
return jobs.jobs[ID]
}
// mark the job as finished
func (job *Job) finish(out Params, err error) {
job.mu.Lock()
job.EndTime = time.Now()
if out == nil {
out = make(Params)
}
job.Output = out
job.Duration = job.EndTime.Sub(job.StartTime).Seconds()
if err != nil {
job.Error = err.Error()
job.Success = false
} else {
job.Error = ""
job.Success = true
}
job.Finished = true
job.mu.Unlock()
running.kickExpire() // make sure this job gets expired
}
// run the job until completion writing the return status
func (job *Job) run(fn Func, in Params) {
defer func() {
if r := recover(); r != nil {
job.finish(nil, errors.Errorf("panic received: %v", r))
}
}()
job.finish(fn(in))
}
// NewJob start a new Job off
func (jobs *Jobs) NewJob(fn Func, in Params) *Job {
job := &Job{
ID: atomic.AddInt64(&jobID, 1),
StartTime: time.Now(),
}
go job.run(fn, in)
jobs.mu.Lock()
jobs.jobs[job.ID] = job
jobs.mu.Unlock()
return job
}
// StartJob starts a new job and returns a Param suitable for output
func StartJob(fn Func, in Params) (Params, error) {
job := running.NewJob(fn, in)
out := make(Params)
out["jobid"] = job.ID
return out, nil
}
func init() {
Add(Call{
Path: "job/status",
Fn: rcJobStatus,
Title: "Reads the status of the job ID",
Help: `Parameters
- jobid - id of the job (integer)
Results
- finished - boolean
- duration - time in seconds that the job ran for
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
- error - error from the job or empty string for no error
- finished - boolean whether the job has finished or not
- id - as passed in above
- startTime - time the job started (eg "2018-10-26T18:50:20.528336039+01:00")
- success - boolean - true for success false otherwise
- output - output of the job as would have been returned if called synchronously
`,
})
}
// Returns the status of a job
func rcJobStatus(in Params) (out Params, err error) {
jobID, err := in.GetInt64("jobid")
if err != nil {
return nil, err
}
job := running.Get(jobID)
if job == nil {
return nil, errors.New("job not found")
}
job.mu.Lock()
defer job.mu.Unlock()
out = make(Params)
err = Reshape(&out, job)
if job == nil {
return nil, errors.New("Reshape failed in job status")
}
return out, nil
}
func init() {
Add(Call{
Path: "job/list",
Fn: rcJobList,
Title: "Lists the IDs of the running jobs",
Help: `Parameters - None
Results
- jobids - array of integer job ids
`,
})
}
// Returns the status of a job
func rcJobList(in Params) (out Params, err error) {
out = make(Params)
out["jobids"] = running.IDs()
return out, nil
}

217
fs/rc/job_test.go Normal file
View File

@@ -0,0 +1,217 @@
package rc
import (
"runtime"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewJobs(t *testing.T) {
jobs := newJobs()
assert.Equal(t, 0, len(jobs.jobs))
}
func TestJobsKickExpire(t *testing.T) {
jobs := newJobs()
jobs.expireInterval = time.Millisecond
assert.Equal(t, false, jobs.expireRunning)
jobs.kickExpire()
jobs.mu.Lock()
assert.Equal(t, true, jobs.expireRunning)
jobs.mu.Unlock()
time.Sleep(10 * time.Millisecond)
jobs.mu.Lock()
assert.Equal(t, false, jobs.expireRunning)
jobs.mu.Unlock()
}
func TestJobsExpire(t *testing.T) {
wait := make(chan struct{})
jobs := newJobs()
jobs.expireInterval = time.Millisecond
assert.Equal(t, false, jobs.expireRunning)
job := jobs.NewJob(func(in Params) (Params, error) {
defer close(wait)
return in, nil
}, Params{})
<-wait
assert.Equal(t, 1, len(jobs.jobs))
jobs.Expire()
assert.Equal(t, 1, len(jobs.jobs))
jobs.mu.Lock()
job.EndTime = time.Now().Add(-expireDuration - 60*time.Second)
assert.Equal(t, true, jobs.expireRunning)
jobs.mu.Unlock()
time.Sleep(10 * time.Millisecond)
jobs.mu.Lock()
assert.Equal(t, false, jobs.expireRunning)
assert.Equal(t, 0, len(jobs.jobs))
jobs.mu.Unlock()
}
var noopFn = func(in Params) (Params, error) {
return nil, nil
}
func TestJobsIDs(t *testing.T) {
jobs := newJobs()
job1 := jobs.NewJob(noopFn, Params{})
job2 := jobs.NewJob(noopFn, Params{})
wantIDs := []int64{job1.ID, job2.ID}
gotIDs := jobs.IDs()
require.Equal(t, 2, len(gotIDs))
if gotIDs[0] != wantIDs[0] {
gotIDs[0], gotIDs[1] = gotIDs[1], gotIDs[0]
}
assert.Equal(t, wantIDs, gotIDs)
}
func TestJobsGet(t *testing.T) {
jobs := newJobs()
job := jobs.NewJob(noopFn, Params{})
assert.Equal(t, job, jobs.Get(job.ID))
assert.Nil(t, jobs.Get(123123123123))
}
var longFn = func(in Params) (Params, error) {
time.Sleep(1 * time.Hour)
return nil, nil
}
func TestJobFinish(t *testing.T) {
jobs := newJobs()
job := jobs.NewJob(longFn, Params{})
assert.Equal(t, true, job.EndTime.IsZero())
assert.Equal(t, Params(nil), job.Output)
assert.Equal(t, 0.0, job.Duration)
assert.Equal(t, "", job.Error)
assert.Equal(t, false, job.Success)
assert.Equal(t, false, job.Finished)
wantOut := Params{"a": 1}
job.finish(wantOut, nil)
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, wantOut, job.Output)
assert.NotEqual(t, 0.0, job.Duration)
assert.Equal(t, "", job.Error)
assert.Equal(t, true, job.Success)
assert.Equal(t, true, job.Finished)
job = jobs.NewJob(longFn, Params{})
job.finish(nil, nil)
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, Params{}, job.Output)
assert.NotEqual(t, 0.0, job.Duration)
assert.Equal(t, "", job.Error)
assert.Equal(t, true, job.Success)
assert.Equal(t, true, job.Finished)
job = jobs.NewJob(longFn, Params{})
job.finish(wantOut, errors.New("potato"))
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, wantOut, job.Output)
assert.NotEqual(t, 0.0, job.Duration)
assert.Equal(t, "potato", job.Error)
assert.Equal(t, false, job.Success)
assert.Equal(t, true, job.Finished)
}
// We've tested the functionality of run() already as it is
// part of NewJob, now just test the panic catching
func TestJobRunPanic(t *testing.T) {
wait := make(chan struct{})
boom := func(in Params) (Params, error) {
defer close(wait)
panic("boom")
}
jobs := newJobs()
job := jobs.NewJob(boom, Params{})
<-wait
runtime.Gosched() // yield to make sure job is updated
// Wait a short time for the panic to propagate
for i := uint(0); i < 10; i++ {
job.mu.Lock()
e := job.Error
job.mu.Unlock()
if e != "" {
break
}
time.Sleep(time.Millisecond << i)
}
job.mu.Lock()
assert.Equal(t, false, job.EndTime.IsZero())
assert.Equal(t, Params{}, job.Output)
assert.NotEqual(t, 0.0, job.Duration)
assert.Equal(t, "panic received: boom", job.Error)
assert.Equal(t, false, job.Success)
assert.Equal(t, true, job.Finished)
job.mu.Unlock()
}
func TestJobsNewJob(t *testing.T) {
jobID = 0
jobs := newJobs()
job := jobs.NewJob(noopFn, Params{})
assert.Equal(t, int64(1), job.ID)
assert.Equal(t, job, jobs.Get(1))
}
func TestStartJob(t *testing.T) {
jobID = 0
out, err := StartJob(longFn, Params{})
assert.NoError(t, err)
assert.Equal(t, Params{"jobid": int64(1)}, out)
}
func TestRcJobStatus(t *testing.T) {
jobID = 0
_, err := StartJob(longFn, Params{})
assert.NoError(t, err)
call := Calls.Get("job/status")
assert.NotNil(t, call)
in := Params{"jobid": 1}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, float64(1), out["id"])
assert.Equal(t, "", out["error"])
assert.Equal(t, false, out["finished"])
assert.Equal(t, false, out["success"])
in = Params{"jobid": 123123123}
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "job not found")
in = Params{"jobidx": 123123123}
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "Didn't find key")
}
func TestRcJobList(t *testing.T) {
jobID = 0
_, err := StartJob(longFn, Params{})
assert.NoError(t, err)
call := Calls.Get("job/list")
assert.NotNil(t, call)
in := Params{}
out, err := call.Fn(in)
require.NoError(t, err)
require.NotNil(t, out)
assert.Equal(t, Params{"jobids": []int64{1}}, out)
}

204
fs/rc/params.go Normal file
View File

@@ -0,0 +1,204 @@
// Parameter parsing
package rc
import (
"encoding/json"
"fmt"
"math"
"strconv"
"github.com/pkg/errors"
)
// Params is the input and output type for the Func
type Params map[string]interface{}
// ErrParamNotFound - this is returned from the Get* functions if the
// parameter isn't found along with a zero value of the requested
// item.
//
// Returning an error of this type from an rc.Func will cause the http
// method to return http.StatusBadRequest
type ErrParamNotFound string
// Error turns this error into a string
func (e ErrParamNotFound) Error() string {
return fmt.Sprintf("Didn't find key %q in input", string(e))
}
// IsErrParamNotFound returns whether err is ErrParamNotFound
func IsErrParamNotFound(err error) bool {
_, isNotFound := err.(ErrParamNotFound)
return isNotFound
}
// NotErrParamNotFound returns true if err != nil and
// !IsErrParamNotFound(err)
//
// This is for checking error returns of the Get* functions to ignore
// error not found returns and take the default value.
func NotErrParamNotFound(err error) bool {
return err != nil && !IsErrParamNotFound(err)
}
// ErrParamInvalid - this is returned from the Get* functions if the
// parameter is invalid.
//
//
// Returning an error of this type from an rc.Func will cause the http
// method to return http.StatusBadRequest
type ErrParamInvalid struct {
error
}
// IsErrParamInvalid returns whether err is ErrParamInvalid
func IsErrParamInvalid(err error) bool {
_, isInvalid := err.(ErrParamInvalid)
return isInvalid
}
// Reshape reshapes one blob of data into another via json serialization
//
// out should be a pointer type
//
// This isn't a very efficient way of dealing with this!
func Reshape(out interface{}, in interface{}) error {
b, err := json.Marshal(in)
if err != nil {
return errors.Wrapf(err, "Reshape failed to Marshal")
}
err = json.Unmarshal(b, out)
if err != nil {
return errors.Wrapf(err, "Reshape failed to Unmarshal")
}
return nil
}
// Get gets a parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be nil.
func (p Params) Get(key string) (interface{}, error) {
value, ok := p[key]
if !ok {
return nil, ErrParamNotFound(key)
}
return value, nil
}
// GetString gets a string parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be "".
func (p Params) GetString(key string) (string, error) {
value, err := p.Get(key)
if err != nil {
return "", err
}
str, ok := value.(string)
if !ok {
return "", ErrParamInvalid{errors.Errorf("expecting string value for key %q (was %T)", key, value)}
}
return str, nil
}
// GetInt64 gets a int64 parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be 0.
func (p Params) GetInt64(key string) (int64, error) {
value, err := p.Get(key)
if err != nil {
return 0, err
}
switch x := value.(type) {
case int:
return int64(x), nil
case int64:
return x, nil
case float64:
if x > math.MaxInt64 || x < math.MinInt64 {
return 0, ErrParamInvalid{errors.Errorf("key %q (%v) overflows int64 ", key, value)}
}
return int64(x), nil
case string:
i, err := strconv.ParseInt(x, 10, 0)
if err != nil {
return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as int64", key, value)}
}
return i, nil
}
return 0, ErrParamInvalid{errors.Errorf("expecting int64 value for key %q (was %T)", key, value)}
}
// GetFloat64 gets a float64 parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be 0.
func (p Params) GetFloat64(key string) (float64, error) {
value, err := p.Get(key)
if err != nil {
return 0, err
}
switch x := value.(type) {
case float64:
return x, nil
case int:
return float64(x), nil
case int64:
return float64(x), nil
case string:
f, err := strconv.ParseFloat(x, 64)
if err != nil {
return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as float64", key, value)}
}
return f, nil
}
return 0, ErrParamInvalid{errors.Errorf("expecting float64 value for key %q (was %T)", key, value)}
}
// GetBool gets a boolean parameter from the input
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and the returned value will be false.
func (p Params) GetBool(key string) (bool, error) {
value, err := p.Get(key)
if err != nil {
return false, err
}
switch x := value.(type) {
case int:
return x != 0, nil
case int64:
return x != 0, nil
case float64:
return x != 0, nil
case bool:
return x, nil
case string:
b, err := strconv.ParseBool(x)
if err != nil {
return false, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as bool", key, value)}
}
return b, nil
}
return false, ErrParamInvalid{errors.Errorf("expecting bool value for key %q (was %T)", key, value)}
}
// GetStruct gets a struct from key from the input into the struct
// pointed to by out. out must be a pointer type.
//
// If the parameter isn't found then error will be of type
// ErrParamNotFound and out will be unchanged.
func (p Params) GetStruct(key string, out interface{}) error {
value, err := p.Get(key)
if err != nil {
return err
}
err = Reshape(out, value)
if err != nil {
return ErrParamInvalid{errors.Wrapf(err, "key %q", key)}
}
return nil
}

251
fs/rc/params_test.go Normal file
View File

@@ -0,0 +1,251 @@
package rc
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestErrParamNotFoundError(t *testing.T) {
e := ErrParamNotFound("key")
assert.Equal(t, "Didn't find key \"key\" in input", e.Error())
}
func TestIsErrParamNotFound(t *testing.T) {
assert.Equal(t, true, IsErrParamNotFound(ErrParamNotFound("key")))
assert.Equal(t, false, IsErrParamNotFound(nil))
assert.Equal(t, false, IsErrParamNotFound(errors.New("potato")))
}
func TestNotErrParamNotFound(t *testing.T) {
assert.Equal(t, false, NotErrParamNotFound(ErrParamNotFound("key")))
assert.Equal(t, false, NotErrParamNotFound(nil))
assert.Equal(t, true, NotErrParamNotFound(errors.New("potato")))
}
func TestIsErrParamInvalid(t *testing.T) {
e := ErrParamInvalid{errors.New("potato")}
assert.Equal(t, true, IsErrParamInvalid(e))
assert.Equal(t, false, IsErrParamInvalid(nil))
assert.Equal(t, false, IsErrParamInvalid(errors.New("potato")))
}
func TestReshape(t *testing.T) {
in := Params{
"String": "hello",
"Float": 4.2,
}
var out struct {
String string
Float float64
}
require.NoError(t, Reshape(&out, in))
assert.Equal(t, "hello", out.String)
assert.Equal(t, 4.2, out.Float)
var inCopy = Params{}
require.NoError(t, Reshape(&inCopy, out))
assert.Equal(t, in, inCopy)
// Now a failure to marshal
var in2 func()
require.Error(t, Reshape(&inCopy, in2))
// Now a failure to unmarshal
require.Error(t, Reshape(&out, "string"))
}
func TestParamsGet(t *testing.T) {
in := Params{
"ok": 1,
}
v1, e1 := in.Get("ok")
assert.NoError(t, e1)
assert.Equal(t, 1, v1)
v2, e2 := in.Get("notOK")
assert.Error(t, e2)
assert.Equal(t, nil, v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
}
func TestParamsGetString(t *testing.T) {
in := Params{
"string": "one",
"notString": 17,
}
v1, e1 := in.GetString("string")
assert.NoError(t, e1)
assert.Equal(t, "one", v1)
v2, e2 := in.GetString("notOK")
assert.Error(t, e2)
assert.Equal(t, "", v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetString("notString")
assert.Error(t, e3)
assert.Equal(t, "", v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetInt64(t *testing.T) {
for _, test := range []struct {
value interface{}
result int64
errString string
}{
{"123", 123, ""},
{"123x", 0, "couldn't parse"},
{int(12), 12, ""},
{int64(13), 13, ""},
{float64(14), 14, ""},
{float64(9.3E18), 0, "overflows int64"},
{float64(-9.3E18), 0, "overflows int64"},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetInt64("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, int64(0), v1)
}
})
}
in := Params{
"notInt64": []string{"a", "b"},
}
v2, e2 := in.GetInt64("notOK")
assert.Error(t, e2)
assert.Equal(t, int64(0), v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetInt64("notInt64")
assert.Error(t, e3)
assert.Equal(t, int64(0), v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetFloat64(t *testing.T) {
for _, test := range []struct {
value interface{}
result float64
errString string
}{
{"123.1", 123.1, ""},
{"123x1", 0, "couldn't parse"},
{int(12), 12, ""},
{int64(13), 13, ""},
{float64(14), 14, ""},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetFloat64("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, float64(0), v1)
}
})
}
in := Params{
"notFloat64": []string{"a", "b"},
}
v2, e2 := in.GetFloat64("notOK")
assert.Error(t, e2)
assert.Equal(t, float64(0), v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetFloat64("notFloat64")
assert.Error(t, e3)
assert.Equal(t, float64(0), v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetBool(t *testing.T) {
for _, test := range []struct {
value interface{}
result bool
errString string
}{
{true, true, ""},
{false, false, ""},
{"true", true, ""},
{"false", false, ""},
{"fasle", false, "couldn't parse"},
{int(12), true, ""},
{int(0), false, ""},
{int64(13), true, ""},
{int64(0), false, ""},
{float64(14), true, ""},
{float64(0), false, ""},
} {
t.Run(fmt.Sprintf("%T=%v", test.value, test.value), func(t *testing.T) {
in := Params{
"key": test.value,
}
v1, e1 := in.GetBool("key")
if test.errString == "" {
require.NoError(t, e1)
assert.Equal(t, test.result, v1)
} else {
require.NotNil(t, e1)
require.Error(t, e1)
assert.Contains(t, e1.Error(), test.errString)
assert.Equal(t, false, v1)
}
})
}
in := Params{
"notBool": []string{"a", "b"},
}
v2, e2 := Params{}.GetBool("notOK")
assert.Error(t, e2)
assert.Equal(t, false, v2)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
v3, e3 := in.GetBool("notBool")
assert.Error(t, e3)
assert.Equal(t, false, v3)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}
func TestParamsGetStruct(t *testing.T) {
in := Params{
"struct": Params{
"String": "one",
"Float": 4.2,
},
}
var out struct {
String string
Float float64
}
e1 := in.GetStruct("struct", &out)
assert.NoError(t, e1)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
e2 := in.GetStruct("notOK", &out)
assert.Error(t, e2)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
assert.Equal(t, ErrParamNotFound("notOK"), e2)
in["struct"] = "string"
e3 := in.GetStruct("struct", &out)
assert.Error(t, e3)
assert.Equal(t, "one", out.String)
assert.Equal(t, 4.2, out.Float)
assert.Equal(t, true, IsErrParamInvalid(e3), e3.Error())
}

View File

@@ -10,19 +10,18 @@ package rc
import (
"encoding/json"
"io"
"net/http"
_ "net/http/pprof" // install the pprof http handlers
"strings"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Options contains options for the remote control server
type Options struct {
HTTPOptions httplib.Options
Enabled bool
Enabled bool // set to enable the server
Serve bool // set to serve files from remotes
Files string // set to enable serving files locally
NoAuth bool // set to disable auth checks on AuthRequired methods
}
// DefaultOpt is the default values used for Options
@@ -35,140 +34,9 @@ func init() {
DefaultOpt.HTTPOptions.ListenAddr = "localhost:5572"
}
// Start the remote control server if configured
func Start(opt *Options) {
if opt.Enabled {
s := newServer(opt)
go s.serve()
}
}
// server contains everything to run the server
type server struct {
srv *httplib.Server
}
func newServer(opt *Options) *server {
// Serve on the DefaultServeMux so can have global registrations appear
mux := http.DefaultServeMux
s := &server{
srv: httplib.NewServer(mux, &opt.HTTPOptions),
}
mux.HandleFunc("/", s.handler)
return s
}
// serve runs the http server - doesn't return
func (s *server) serve() {
err := s.srv.Serve()
if err != nil {
fs.Errorf(nil, "Opening listener: %v", err)
}
fs.Logf(nil, "Serving remote control on %s", s.srv.URL())
s.srv.Wait()
}
// WriteJSON writes JSON in out to w
func WriteJSON(w io.Writer, out Params) error {
enc := json.NewEncoder(w)
enc.SetIndent("", "\t")
return enc.Encode(out)
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
path := strings.Trim(r.URL.Path, "/")
in := make(Params)
writeError := func(err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
w.WriteHeader(status)
err = WriteJSON(w, Params{
"error": err.Error(),
"input": in,
})
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
// Parse the POST and URL parameters into r.Form, for others r.Form will be empty value
err := r.ParseForm()
if err != nil {
writeError(errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
return
}
// Read the POST and URL parameters into in
for k, vs := range r.Form {
if len(vs) > 0 {
in[k] = vs[len(vs)-1]
}
}
// Parse a JSON blob from the input
if r.Header.Get("Content-Type") == "application/json" {
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
writeError(errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
return
}
}
fs.Debugf(nil, "form = %+v", r.Form)
w.Header().Add("Access-Control-Allow-Origin", "*")
//echo back headers client needs
reqAccessHeaders := r.Header.Get("Access-Control-Request-Headers")
w.Header().Add("Access-Control-Allow-Headers", reqAccessHeaders)
switch r.Method {
case "POST":
s.handlePost(w, r, path, in)
case "OPTIONS":
s.handleOptions(w, r, in)
default:
writeError(errors.Errorf("method %q not allowed - POST or OPTIONS required", r.Method), http.StatusMethodNotAllowed)
return
}
}
func (s *server) handlePost(w http.ResponseWriter, r *http.Request, path string, in Params) {
writeError := func(err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
w.WriteHeader(status)
err = WriteJSON(w, Params{
"error": err.Error(),
"input": in,
})
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
// Find the call
call := registry.get(path)
if call == nil {
writeError(errors.Errorf("couldn't find method %q", path), http.StatusMethodNotAllowed)
return
}
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
out, err := call.Fn(in)
if err != nil {
writeError(errors.Wrap(err, "remote control command failed"), http.StatusInternalServerError)
return
}
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
err = WriteJSON(w, out)
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
func (s *server) handleOptions(w http.ResponseWriter, r *http.Request, in Params) {
w.WriteHeader(http.StatusOK)
}

23
fs/rc/rc_test.go Normal file
View File

@@ -0,0 +1,23 @@
package rc
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestWriteJSON(t *testing.T) {
var buf bytes.Buffer
err := WriteJSON(&buf, Params{
"String": "hello",
"Int": 42,
})
require.NoError(t, err)
assert.Equal(t, `{
"Int": 42,
"String": "hello"
}
`, buf.String())
}

View File

@@ -15,6 +15,10 @@ var (
// AddFlags adds the remote control flags to the flagSet
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("rc", &Opt)
flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server.")
flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.")
flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.")
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
}

279
fs/rc/rcserver/rcserver.go Normal file
View File

@@ -0,0 +1,279 @@
// Package rcserver implements the HTTP endpoint to serve the remote control
package rcserver
import (
"encoding/json"
"mime"
"net/http"
"net/url"
"regexp"
"sort"
"strings"
"github.com/ncw/rclone/cmd/serve/httplib"
"github.com/ncw/rclone/cmd/serve/httplib/serve"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/rc"
"github.com/pkg/errors"
"github.com/skratchdot/open-golang/open"
)
// Start the remote control server if configured
//
// If the server wasn't configured the *Server returned may be nil
func Start(opt *rc.Options) (*Server, error) {
if opt.Enabled {
// Serve on the DefaultServeMux so can have global registrations appear
s := newServer(opt, http.DefaultServeMux)
return s, s.Serve()
}
return nil, nil
}
// Server contains everything to run the rc server
type Server struct {
*httplib.Server
files http.Handler
opt *rc.Options
}
func newServer(opt *rc.Options, mux *http.ServeMux) *Server {
s := &Server{
Server: httplib.NewServer(mux, &opt.HTTPOptions),
opt: opt,
}
mux.HandleFunc("/", s.handler)
// Add some more mime types which are often missing
_ = mime.AddExtensionType(".wasm", "application/wasm")
_ = mime.AddExtensionType(".js", "application/javascript")
// File handling
if opt.Files != "" {
fs.Logf(nil, "Serving files from %q", opt.Files)
s.files = http.FileServer(http.Dir(opt.Files))
}
return s
}
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *Server) Serve() error {
err := s.Server.Serve()
if err != nil {
return err
}
fs.Logf(nil, "Serving remote control on %s", s.URL())
// Open the files in the browser if set
if s.files != nil {
openURL, err := url.Parse(s.URL())
if err != nil {
return errors.Wrap(err, "invalid serving URL")
}
// Add username, password into the URL if they are set
user, pass := s.opt.HTTPOptions.BasicUser, s.opt.HTTPOptions.BasicPass
if user != "" || pass != "" {
openURL.User = url.UserPassword(user, pass)
}
_ = open.Start(openURL.String())
}
return nil
}
// writeError writes a formatted error to the output
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
// Adjust the error return for some well known errors
errOrig := errors.Cause(err)
switch {
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
status = http.StatusNotFound
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
status = http.StatusBadRequest
}
w.WriteHeader(status)
err = rc.WriteJSON(w, rc.Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
})
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
// handler reads incoming requests and dispatches them
func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
path := strings.TrimLeft(r.URL.Path, "/")
w.Header().Add("Access-Control-Allow-Origin", "*")
// echo back access control headers client needs
reqAccessHeaders := r.Header.Get("Access-Control-Request-Headers")
w.Header().Add("Access-Control-Allow-Headers", reqAccessHeaders)
switch r.Method {
case "POST":
s.handlePost(w, r, path)
case "OPTIONS":
s.handleOptions(w, r, path)
case "GET", "HEAD":
s.handleGet(w, r, path)
default:
writeError(path, nil, w, errors.Errorf("method %q not allowed", r.Method), http.StatusMethodNotAllowed)
return
}
}
func (s *Server) handlePost(w http.ResponseWriter, r *http.Request, path string) {
contentType := r.Header.Get("Content-Type")
values := r.URL.Query()
if contentType == "application/x-www-form-urlencoded" {
// Parse the POST and URL parameters into r.Form, for others r.Form will be empty value
err := r.ParseForm()
if err != nil {
writeError(path, nil, w, errors.Wrap(err, "failed to parse form/URL parameters"), http.StatusBadRequest)
return
}
values = r.Form
}
// Read the POST and URL parameters into in
in := make(rc.Params)
for k, vs := range values {
if len(vs) > 0 {
in[k] = vs[len(vs)-1]
}
}
// Parse a JSON blob from the input
if contentType == "application/json" {
err := json.NewDecoder(r.Body).Decode(&in)
if err != nil {
writeError(path, in, w, errors.Wrap(err, "failed to read input JSON"), http.StatusBadRequest)
return
}
}
// Find the call
call := rc.Calls.Get(path)
if call == nil {
writeError(path, in, w, errors.Errorf("couldn't find method %q", path), http.StatusNotFound)
return
}
// Check to see if it requires authorisation
if !s.opt.NoAuth && call.AuthRequired && !s.UsingAuth() {
writeError(path, in, w, errors.Errorf("authentication must be set up on the rc server to use %q or the --rc-no-auth flag must be in use", path), http.StatusForbidden)
return
}
// Check to see if it is async or not
isAsync, err := in.GetBool("_async")
if rc.NotErrParamNotFound(err) {
writeError(path, in, w, err, http.StatusBadRequest)
return
}
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
var out rc.Params
if isAsync {
out, err = rc.StartJob(call.Fn, in)
} else {
out, err = call.Fn(in)
}
if err != nil {
writeError(path, in, w, err, http.StatusInternalServerError)
return
}
if out == nil {
out = make(rc.Params)
}
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
err = rc.WriteJSON(w, out)
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: failed to write JSON output: %v", err)
}
}
func (s *Server) handleOptions(w http.ResponseWriter, r *http.Request, path string) {
w.WriteHeader(http.StatusOK)
}
func (s *Server) serveRoot(w http.ResponseWriter, r *http.Request) {
remotes := config.FileSections()
sort.Strings(remotes)
directory := serve.NewDirectory("")
directory.Title = "List of all rclone remotes."
q := url.Values{}
for _, remote := range remotes {
q.Set("fs", remote)
directory.AddEntry("["+remote+":]", true)
}
directory.Serve(w, r)
}
func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string, fsName string) {
f, err := rc.GetCachedFs(fsName)
if err != nil {
writeError(path, nil, w, errors.Wrap(err, "failed to make Fs"), http.StatusInternalServerError)
return
}
if path == "" || strings.HasSuffix(path, "/") {
path = strings.Trim(path, "/")
entries, err := list.DirSorted(f, false, path)
if err != nil {
writeError(path, nil, w, errors.Wrap(err, "failed to list directory"), http.StatusInternalServerError)
return
}
// Make the entries for display
directory := serve.NewDirectory(path)
for _, entry := range entries {
_, isDir := entry.(fs.Directory)
directory.AddEntry(entry.Remote(), isDir)
}
directory.Serve(w, r)
} else {
o, err := f.NewObject(path)
if err != nil {
writeError(path, nil, w, errors.Wrap(err, "failed to find object"), http.StatusInternalServerError)
return
}
serve.Object(w, r, o)
}
}
// Match URLS of the form [fs]/remote
var fsMatch = regexp.MustCompile(`^\[(.*?)\](.*)$`)
func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string) {
// Look to see if this has an fs in the path
match := fsMatch.FindStringSubmatch(path)
switch {
case match != nil && s.opt.Serve:
// Serve /[fs]/remote files
s.serveRemote(w, r, match[2], match[1])
return
case path == "*" && s.opt.Serve:
// Serve /* as the remote listing
s.serveRoot(w, r)
return
case s.files != nil:
// Serve the files
s.files.ServeHTTP(w, r)
return
case path == "" && s.opt.Serve:
// Serve the root as a remote listing
s.serveRoot(w, r)
return
}
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}

View File

@@ -0,0 +1,632 @@
// +build go1.8
package rcserver
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"regexp"
"testing"
"time"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testBindAddress = "localhost:51781"
testURL = "http://" + testBindAddress + "/"
testFs = "testdata/files"
remoteURL = "[" + testFs + "]/" // initial URL path to fetch from that remote
)
// Test the RC server runs and we can do HTTP fetches from it.
// We'll do the majority of the testing with the httptest framework
func TestRcServer(t *testing.T) {
opt := rc.DefaultOpt
opt.HTTPOptions.ListenAddr = testBindAddress
opt.Enabled = true
opt.Serve = true
opt.Files = testFs
mux := http.NewServeMux()
rcServer := newServer(&opt, mux)
assert.NoError(t, rcServer.Serve())
defer func() {
rcServer.Close()
rcServer.Wait()
}()
// Do the simplest possible test to check the server is alive
// Do it a few times to wait for the server to start
var resp *http.Response
var err error
for i := 0; i < 10; i++ {
resp, err = http.Get(testURL + "file.txt")
if err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
require.NoError(t, err)
body, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
require.NoError(t, err)
require.NoError(t, resp.Body.Close())
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "this is file1.txt\n", string(body))
}
type testRun struct {
Name string
URL string
Status int
Method string
Range string
Body string
ContentType string
Expected string
Contains *regexp.Regexp
Headers map[string]string
}
// Run a suite of tests
func testServer(t *testing.T, tests []testRun, opt *rc.Options) {
mux := http.NewServeMux()
rcServer := newServer(opt, mux)
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
method := test.Method
if method == "" {
method = "GET"
}
var inBody io.Reader
if test.Body != "" {
buf := bytes.NewBufferString(test.Body)
inBody = buf
}
req, err := http.NewRequest(method, "http://1.2.3.4/"+test.URL, inBody)
require.NoError(t, err)
if test.Range != "" {
req.Header.Add("Range", test.Range)
}
if test.ContentType != "" {
req.Header.Add("Content-Type", test.ContentType)
}
w := httptest.NewRecorder()
rcServer.handler(w, req)
resp := w.Result()
assert.Equal(t, test.Status, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
if test.Contains == nil {
assert.Equal(t, test.Expected, string(body))
} else {
assert.True(t, test.Contains.Match(body), fmt.Sprintf("body didn't match: %v: %v", test.Contains, string(body)))
}
for k, v := range test.Headers {
assert.Equal(t, v, resp.Header.Get(k), k)
}
})
}
}
// return an enabled rc
func newTestOpt() rc.Options {
opt := rc.DefaultOpt
opt.Enabled = true
return opt
}
func TestFileServing(t *testing.T) {
tests := []testRun{{
Name: "index",
URL: "",
Status: http.StatusOK,
Expected: `<pre>
<a href="dir/">dir/</a>
<a href="file.txt">file.txt</a>
</pre>
`,
}, {
Name: "notfound",
URL: "notfound",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dirnotfound",
URL: "dirnotfound/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusOK,
Expected: `<pre>
<a href="file2.txt">file2.txt</a>
</pre>
`,
}, {
Name: "file",
URL: "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRemoteServing(t *testing.T) {
tests := []testRun{
// Test serving files from the test remote
{
Name: "index",
URL: remoteURL + "",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /</title>
</head>
<body>
<h1>Directory listing of /</h1>
<a href="dir/">dir/</a><br />
<a href="file.txt">file.txt</a><br />
</body>
</html>
`,
}, {
Name: "notfound-index",
URL: "[notfound]/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "",
"status": 404
}
`,
}, {
Name: "notfound",
URL: remoteURL + "notfound",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to find object: object not found",
"input": null,
"path": "/notfound",
"status": 404
}
`,
}, {
Name: "dirnotfound",
URL: remoteURL + "dirnotfound/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "dirnotfound",
"status": 404
}
`,
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /dir</title>
</head>
<body>
<h1>Directory listing of /dir</h1>
<a href="file2.txt">file2.txt</a><br />
</body>
</html>
`,
}, {
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: remoteURL + "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: remoteURL + "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: remoteURL + "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}, {
Name: "bad-remote",
URL: "[notfoundremote:]/",
Status: http.StatusInternalServerError,
Expected: `{
"error": "failed to make Fs: didn't find section in config file",
"input": null,
"path": "/",
"status": 500
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRC(t *testing.T) {
tests := []testRun{{
Name: "rc-root",
URL: "",
Method: "POST",
Status: http.StatusNotFound,
Expected: `{
"error": "couldn't find method \"\"",
"input": {},
"path": "",
"status": 404
}
`,
}, {
Name: "rc-noop",
URL: "rc/noop",
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "rc-error",
URL: "rc/error",
Method: "POST",
Status: http.StatusInternalServerError,
Expected: `{
"error": "arbitrary error on input map[]",
"input": {},
"path": "rc/error",
"status": 500
}
`,
}, {
Name: "core-gc",
URL: "core/gc", // returns nil, nil so check it is made into {}
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "url-params",
URL: "rc/noop?param1=potato&param2=sausage",
Method: "POST",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage"
}
`,
}, {
Name: "json",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-and-url-params",
URL: "rc/noop?param1=potato&param2=sausage",
Method: "POST",
Body: `{ "param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "sausage",
"param3": true
}
`,
}, {
Name: "json-bad",
URL: "rc/noop?param1=potato&param2=sausage",
Method: "POST",
Body: `{ param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to read input JSON: invalid character 'p' looking for beginning of object key string",
"input": {
"param1": "potato",
"param2": "sausage"
},
"path": "rc/noop",
"status": 400
}
`,
}, {
Name: "form",
URL: "rc/noop",
Method: "POST",
Body: `param1=string&param2=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "true"
}
`,
}, {
Name: "form-and-url-params",
URL: "rc/noop?param1=potato&param2=sausage",
Method: "POST",
Body: `param1=string&param3=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage",
"param3": "true"
}
`,
}, {
Name: "form-bad",
URL: "rc/noop?param1=potato&param2=sausage",
Method: "POST",
Body: `%zz`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to parse form/URL parameters: invalid URL escape \"%zz\"",
"input": null,
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestMethods(t *testing.T) {
tests := []testRun{{
Name: "options",
URL: "",
Method: "OPTIONS",
Status: http.StatusOK,
Expected: "",
Headers: map[string]string{
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "",
},
}, {
Name: "bad",
URL: "",
Method: "POTATO",
Status: http.StatusMethodNotAllowed,
Expected: `{
"error": "method \"POTATO\" not allowed",
"input": null,
"path": "",
"status": 405
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
var matchRemoteDirListing = regexp.MustCompile(`<title>List of all rclone remotes.</title>`)
func TestServingRoot(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "*",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestServingRootNoFiles(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoFiles(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: "file.txt",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoServe(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestAuthRequired(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusForbidden,
Expected: `{
"error": "authentication must be set up on the rc server to use \"rc/noopauth\" or the --rc-no-auth flag must be in use",
"input": {},
"path": "rc/noopauth",
"status": 403
}
`,
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
testServer(t, tests, &opt)
}
func TestNoAuth(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = true
testServer(t, tests, &opt)
}
func TestWithUserPass(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
opt.HTTPOptions.BasicUser = "user"
opt.HTTPOptions.BasicPass = "pass"
testServer(t, tests, &opt)
}
func TestRCAsync(t *testing.T) {
tests := []testRun{{
Name: "ok",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":true }`,
Status: http.StatusOK,
Expected: `{
"jobid": 1
}
`,
}, {
Name: "bad",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":"truthy" }`,
Status: http.StatusBadRequest,
Expected: `{
"error": "couldn't parse key \"_async\" (truthy) as bool: strconv.ParseBool: parsing \"truthy\": invalid syntax",
"input": {
"_async": "truthy"
},
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}

View File

@@ -0,0 +1 @@
this is dir/file2.txt

View File

@@ -0,0 +1 @@
this is file1.txt

View File

@@ -10,19 +10,17 @@ import (
"github.com/ncw/rclone/fs"
)
// Params is the input and output type for the Func
type Params map[string]interface{}
// Func defines a type for a remote control function
type Func func(in Params) (out Params, err error)
// Call defines info about a remote control function and is used in
// the Add function to create new entry points.
type Call struct {
Path string // path to activate this RC
Fn Func `json:"-"` // function to call
Title string // help for the function
Help string // multi-line markdown formatted help
Path string // path to activate this RC
Fn Func `json:"-"` // function to call
Title string // help for the function
AuthRequired bool // if set then this call requires authorisation to be set
Help string // multi-line markdown formatted help
}
// Registry holds the list of all the registered remote control functions
@@ -39,7 +37,7 @@ func NewRegistry() *Registry {
}
// Add a call to the registry
func (r *Registry) add(call Call) {
func (r *Registry) Add(call Call) {
r.mu.Lock()
defer r.mu.Unlock()
call.Path = strings.Trim(call.Path, "/")
@@ -48,15 +46,15 @@ func (r *Registry) add(call Call) {
r.call[call.Path] = &call
}
// get a Call from a path or nil
func (r *Registry) get(path string) *Call {
// Get a Call from a path or nil
func (r *Registry) Get(path string) *Call {
r.mu.RLock()
defer r.mu.RUnlock()
return r.call[path]
}
// get a list of all calls in alphabetical order
func (r *Registry) list() (out []*Call) {
// List of all calls in alphabetical order
func (r *Registry) List() (out []*Call) {
r.mu.RLock()
defer r.mu.RUnlock()
var keys []string
@@ -70,10 +68,10 @@ func (r *Registry) list() (out []*Call) {
return out
}
// The global registry
var registry = NewRegistry()
// Calls is the global registry of Call objects
var Calls = NewRegistry()
// Add a function to the global registry
func Add(call Call) {
registry.add(call)
Calls.Add(call)
}

57
fs/sync/rc.go Normal file
View File

@@ -0,0 +1,57 @@
package sync
import (
"github.com/ncw/rclone/fs/rc"
)
func init() {
for _, name := range []string{"sync", "copy", "move"} {
name := name
moveHelp := ""
if name == "move" {
moveHelp = "- deleteEmptySrcDirs - delete empty src directories if set\n"
}
rc.Add(rc.Call{
Path: "sync/" + name,
AuthRequired: true,
Fn: func(in rc.Params) (rc.Params, error) {
return rcSyncCopyMove(in, name)
},
Title: name + " a directory from source remote to destination remote",
Help: `This takes the following parameters
- srcFs - a remote name string eg "drive:src" for the source
- dstFs - a remote name string eg "drive:dst" for the destination
` + moveHelp + `
This returns
- jobid - ID of async job to query with job/status
See the [` + name + ` command](/commands/rclone_` + name + `/) command for more information on the above.`,
})
}
}
// Sync/Copy/Move a file
func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
srcFs, err := rc.GetFsNamed(in, "srcFs")
if err != nil {
return nil, err
}
dstFs, err := rc.GetFsNamed(in, "dstFs")
if err != nil {
return nil, err
}
switch name {
case "sync":
return nil, Sync(dstFs, srcFs)
case "copy":
return nil, CopyDir(dstFs, srcFs)
case "move":
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
}
panic("unknown rcSyncCopyMove type")
}

97
fs/sync/rc_test.go Normal file
View File

@@ -0,0 +1,97 @@
package sync
import (
"testing"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func rcNewRun(t *testing.T, method string) (*fstest.Run, *rc.Call) {
if *fstest.RemoteName != "" {
t.Skip("Skipping test on non local remote")
}
r := fstest.NewRun(t)
call := rc.Calls.Get(method)
assert.NotNil(t, call)
rc.PutCachedFs(r.LocalName, r.Flocal)
rc.PutCachedFs(r.FremoteName, r.Fremote)
return r, call
}
// sync/copy: copy a directory from source remote to destination remote
func TestRcCopy(t *testing.T) {
r, call := rcNewRun(t, "sync/copy")
defer r.Finalise()
r.Mkdir(r.Fremote)
file1 := r.WriteBoth("file1", "file1 contents", t1)
file2 := r.WriteFile("subdir/file2", "file2 contents", t2)
file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file3)
in := rc.Params{
"srcFs": r.LocalName,
"dstFs": r.FremoteName,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
}
// sync/move: move a directory from source remote to destination remote
func TestRcMove(t *testing.T) {
r, call := rcNewRun(t, "sync/move")
defer r.Finalise()
r.Mkdir(r.Fremote)
file1 := r.WriteBoth("file1", "file1 contents", t1)
file2 := r.WriteFile("subdir/file2", "file2 contents", t2)
file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file3)
in := rc.Params{
"srcFs": r.LocalName,
"dstFs": r.FremoteName,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Flocal)
fstest.CheckItems(t, r.Fremote, file1, file2, file3)
}
// sync/sync: sync a directory from source remote to destination remote
func TestRcSync(t *testing.T) {
r, call := rcNewRun(t, "sync/sync")
defer r.Finalise()
r.Mkdir(r.Fremote)
file1 := r.WriteBoth("file1", "file1 contents", t1)
file2 := r.WriteFile("subdir/file2", "file2 contents", t2)
file3 := r.WriteObject("subdir/subsubdir/file3", "file3 contents", t3)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file3)
in := rc.Params{
"srcFs": r.LocalName,
"dstFs": r.FremoteName,
}
out, err := call.Fn(in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2)
}

View File

@@ -79,6 +79,35 @@ func TestCopyWithDepth(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
}
// Test copy with files from
func TestCopyWithFilesFrom(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("potato2", "hello world", t1)
file2 := r.WriteFile("hello world2", "hello world2", t2)
// Set the --files-from equivalent
f, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, f.AddFile("potato2"))
require.NoError(t, f.AddFile("notfound"))
// Monkey patch the active filter
oldFilter := filter.Active
filter.Active = f
unpatch := func() {
filter.Active = oldFilter
}
defer unpatch()
err = CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
unpatch()
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1)
}
// Test copy empty directories
func TestCopyEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.44"
var Version = "v1.44-DEV"

86
fs/version/version.go Normal file
View File

@@ -0,0 +1,86 @@
package version
import (
"fmt"
"regexp"
"strconv"
"strings"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors"
)
// Version represents a parsed rclone version number
type Version []int
var parseVersion = regexp.MustCompile(`^(?:rclone )?v(\d+)\.(\d+)(?:\.(\d+))?(?:-(\d+)(?:-(g[\wβ-]+))?)?$`)
// New parses a version number from a string
//
// This will be returned with up to 4 elements for major, minor,
// patch, subpatch release.
//
// If the version number represents a compiled from git version
// number, then it will be returned as major, minor, 999, 999
func New(in string) (v Version, err error) {
isGit := strings.HasSuffix(in, "-DEV")
if isGit {
in = in[:len(in)-4]
}
r := parseVersion.FindStringSubmatch(in)
if r == nil {
return v, errors.Errorf("failed to match version string %q", in)
}
atoi := func(s string) int {
i, err := strconv.Atoi(s)
if err != nil {
fs.Errorf(nil, "Failed to parse %q as int from %q: %v", s, in, err)
}
return i
}
v = Version{
atoi(r[1]), // major
atoi(r[2]), // minor
}
if r[3] != "" {
v = append(v, atoi(r[3])) // patch
} else if r[4] != "" {
v = append(v, 0) // patch
}
if r[4] != "" {
v = append(v, atoi(r[4])) // dev
}
if isGit {
v = append(v, 999, 999)
}
return v, nil
}
// String converts v to a string
func (v Version) String() string {
var out []string
for _, vv := range v {
out = append(out, fmt.Sprint(vv))
}
return strings.Join(out, ".")
}
// Cmp compares two versions returning >0, <0 or 0
func (v Version) Cmp(o Version) (d int) {
n := len(v)
if n > len(o) {
n = len(o)
}
for i := 0; i < n; i++ {
d = v[i] - o[i]
if d != 0 {
return d
}
}
return len(v) - len(o)
}
// IsGit returns true if the current version was compiled from git
func (v Version) IsGit() bool {
return len(v) >= 4 && v[2] == 999 && v[3] == 999
}

View File

@@ -0,0 +1,89 @@
package version
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNew(t *testing.T) {
for _, test := range []struct {
in string
want Version
wantErr bool
}{
{"v1.41", Version{1, 41}, false},
{"rclone v1.41", Version{1, 41}, false},
{"rclone v1.41.23", Version{1, 41, 23}, false},
{"rclone v1.41.23-100", Version{1, 41, 23, 100}, false},
{"rclone v1.41-100", Version{1, 41, 0, 100}, false},
{"rclone v1.41.23-100-g12312a", Version{1, 41, 23, 100}, false},
{"rclone v1.41-100-g12312a", Version{1, 41, 0, 100}, false},
{"rclone v1.42-005-g56e1e820β", Version{1, 42, 0, 5}, false},
{"rclone v1.42-005-g56e1e820-feature-branchβ", Version{1, 42, 0, 5}, false},
{"v1.41s", nil, true},
{"rclone v1-41", nil, true},
{"rclone v1.41.2c3", nil, true},
{"rclone v1.41.23-100 potato", nil, true},
{"rclone 1.41-100", nil, true},
{"rclone v1.41.23-100-12312a", nil, true},
{"v1.41-DEV", Version{1, 41, 999, 999}, false},
} {
what := fmt.Sprintf("in=%q", test.in)
got, err := New(test.in)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
}
assert.Equal(t, test.want, got, what)
}
}
func TestCmp(t *testing.T) {
for _, test := range []struct {
a, b Version
want int
}{
{Version{1}, Version{1}, 0},
{Version{1}, Version{2}, -1},
{Version{2}, Version{1}, 1},
{Version{2}, Version{2, 1}, -1},
{Version{2, 1}, Version{2}, 1},
{Version{2, 1}, Version{2, 1}, 0},
{Version{2, 1}, Version{2, 2}, -1},
{Version{2, 2}, Version{2, 1}, 1},
} {
got := test.a.Cmp(test.b)
if got < 0 {
got = -1
} else if got > 0 {
got = 1
}
assert.Equal(t, test.want, got, fmt.Sprintf("%v cmp %v", test.a, test.b))
// test the reverse
got = -test.b.Cmp(test.a)
assert.Equal(t, test.want, got, fmt.Sprintf("%v cmp %v", test.b, test.a))
}
}
func TestString(t *testing.T) {
v, err := New("v1.44.1-2")
assert.NoError(t, err)
assert.Equal(t, "1.44.1.2", v.String())
}
func TestIsGit(t *testing.T) {
v, err := New("v1.44")
assert.NoError(t, err)
assert.Equal(t, false, v.IsGit())
v, err = New("v1.44-DEV")
assert.NoError(t, err)
assert.Equal(t, true, v.IsGit())
}

View File

@@ -1,7 +1,7 @@
//+build !go1.7
//+build !go1.8
package fs
// Upgrade to Go version 1.7 to compile rclone - latest stable go
// Upgrade to Go version 1.8 to compile rclone - latest stable go
// compiler recommended.
func init() { Go_version_1_7_required_for_compilation() }
func init() { Go_version_1_8_required_for_compilation() }

View File

@@ -54,8 +54,14 @@ type Func func(path string, entries fs.DirEntries, err error) error
// This is implemented by WalkR if Config.UseRecursiveListing is true
// and f supports it and level > 1, or WalkN otherwise.
//
// If --files-from is set then a DirTree will be constructed with just
// those files in and then walked with WalkR
//
// NB (f, path) to be replaced by fs.Dir at some point
func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if filter.Active.HaveFilesFrom() {
return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject))
}
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
return walkListR(f, path, includeAll, maxLevel, fn)
}
@@ -452,8 +458,14 @@ func walkNDirTree(f fs.Fs, path string, includeAll bool, maxLevel int, listDir l
// This is implemented by WalkR if Config.UseRecursiveListing is true
// and f supports it and level > 1, or WalkN otherwise.
//
// If --files-from is set then a DirTree will be constructed with just
// those files in.
//
// NB (f, path) to be replaced by fs.Dir at some point
func NewDirTree(f fs.Fs, path string, includeAll bool, maxLevel int) (DirTree, error) {
if filter.Active.HaveFilesFrom() {
return walkRDirTree(f, path, includeAll, maxLevel, filter.Active.MakeListR(f.NewObject))
}
if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && ListR != nil {
return walkRDirTree(f, path, includeAll, maxLevel, ListR)
}

View File

@@ -540,16 +540,13 @@ func Run(t *testing.T, opt *Opt) {
minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
}
maxChunkSize := opt.ChunkedUpload.MaxChunkSize
if maxChunkSize < minChunkSize {
if minChunkSize <= fs.MebiByte {
maxChunkSize = 2 * fs.MebiByte
} else {
maxChunkSize = 2 * minChunkSize
}
} else if maxChunkSize >= 2*minChunkSize {
maxChunkSize := 2 * fs.MebiByte
if maxChunkSize < 2*minChunkSize {
maxChunkSize = 2 * minChunkSize
}
if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize {
maxChunkSize = opt.ChunkedUpload.MaxChunkSize
}
if opt.ChunkedUpload.CeilChunkSize != nil {
maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
}

106
fstest/mockfs/mockfs.go Normal file
View File

@@ -0,0 +1,106 @@
package mockfs
import (
"errors"
"fmt"
"io"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
)
// Fs is a minimal mock Fs
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
features *fs.Features // optional features
}
// ErrNotImplemented is returned by unimplemented methods
var ErrNotImplemented = errors.New("not implemented")
// NewFs returns a new mock Fs
func NewFs(name, root string) *Fs {
f := &Fs{
name: name,
root: root,
}
f.features = (&fs.Features{}).Fill(f)
return f
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Mock file system at %s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return nil, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, ErrNotImplemented
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(dir string) error {
return ErrNotImplemented
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(dir string) error {
return ErrNotImplemented
}
// Assert it is the correct type
var _ fs.Fs = (*Fs)(nil)

62
fstest/test_all/clean.go Normal file
View File

@@ -0,0 +1,62 @@
// Clean the left over test files
// +build go1.10
package main
import (
"log"
"regexp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/list"
"github.com/ncw/rclone/fs/operations"
)
// MatchTestRemote matches the remote names used for testing (copied
// from fstest/fstest.go so we don't have to import that and get all
// its flags)
var MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
// cleanFs runs a single clean fs for left over directories
func cleanFs(remote string) error {
f, err := fs.NewFs(remote)
if err != nil {
return err
}
entries, err := list.DirSorted(f, true, "")
if err != nil {
return err
}
return entries.ForDirError(func(dir fs.Directory) error {
dirPath := dir.Remote()
fullPath := remote + dirPath
if MatchTestRemote.MatchString(dirPath) {
if *dryRun {
log.Printf("Not Purging %s - -dry-run", fullPath)
return nil
}
log.Printf("Purging %s", fullPath)
dir, err := fs.NewFs(fullPath)
if err != nil {
return err
}
return operations.Purge(dir, "")
}
return nil
})
}
// cleanRemotes cleans the list of remotes passed in
func cleanRemotes(remotes []string) error {
var lastError error
for _, remote := range remotes {
log.Printf("%q - Cleaning", remote)
err := cleanFs(remote)
if err != nil {
lastError = err
log.Printf("Failed to purge %q: %v", remote, err)
}
}
return lastError
}

165
fstest/test_all/config.go Normal file
View File

@@ -0,0 +1,165 @@
// Config handling
// +build go1.10
package main
import (
"io/ioutil"
"log"
"path"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
// Test describes an integration test to run with `go test`
type Test struct {
Path string // path to the source directory
SubDir bool // if it is possible to add -sub-dir to tests
FastList bool // if it is possible to add -fast-list to tests
AddBackend bool // set if Path needs the current backend appending
NoRetries bool // set if no retries should be performed
NoBinary bool // set to not build a binary in advance
}
// Backend describes a backend test
//
// FIXME make bucket based remotes set sub-dir automatically???
type Backend struct {
Backend string // name of the backend directory
Remote string // name of the test remote
SubDir bool // set to test with -sub-dir
FastList bool // set to test with -fast-list
OneOnly bool // set to run only one backend test at once
}
// MakeRuns creates Run objects the Backend and Test
//
// There can be several created, one for each combination of SubDir
// and FastList
func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
subdirs := []bool{false}
if b.SubDir && t.SubDir {
subdirs = append(subdirs, true)
}
fastlists := []bool{false}
if b.FastList && t.FastList {
fastlists = append(fastlists, true)
}
for _, subdir := range subdirs {
for _, fastlist := range fastlists {
run := &Run{
Remote: b.Remote,
Backend: b.Backend,
Path: t.Path,
SubDir: subdir,
FastList: fastlist,
NoRetries: t.NoRetries,
OneOnly: b.OneOnly,
NoBinary: t.NoBinary,
}
if t.AddBackend {
run.Path = path.Join(run.Path, b.Backend)
}
runs = append(runs, run)
}
}
return runs
}
// Config describes the config for this program
type Config struct {
Tests []Test
Backends []Backend
}
// NewConfig reads the config file
func NewConfig(configFile string) (*Config, error) {
d, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "failed to read config file")
}
config := &Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
return nil, errors.Wrap(err, "failed to parse config file")
}
// d, err = yaml.Marshal(&config)
// if err != nil {
// log.Fatalf("error: %v", err)
// }
// fmt.Printf("--- m dump:\n%s\n\n", string(d))
return config, nil
}
// MakeRuns makes Run objects for each combination of Backend and Test
// in the config
func (c *Config) MakeRuns() (runs Runs) {
for _, backend := range c.Backends {
for _, test := range c.Tests {
runs = append(runs, backend.MakeRuns(&test)...)
}
}
return runs
}
// Filter the Backends with the remotes passed in.
//
// If no backend is found with a remote is found then synthesize one
func (c *Config) filterBackendsByRemotes(remotes []string) {
var newBackends []Backend
for _, name := range remotes {
found := false
for i := range c.Backends {
if c.Backends[i].Remote == name {
newBackends = append(newBackends, c.Backends[i])
found = true
}
}
if !found {
log.Printf("Remote %q not found - inserting with default flags", name)
newBackends = append(newBackends, Backend{Remote: name})
}
}
c.Backends = newBackends
}
// Filter the Backends with the backendNames passed in
func (c *Config) filterBackendsByBackends(backendNames []string) {
var newBackends []Backend
for _, name := range backendNames {
for i := range c.Backends {
if c.Backends[i].Backend == name {
newBackends = append(newBackends, c.Backends[i])
}
}
}
c.Backends = newBackends
}
// Filter the incoming tests into the backends selected
func (c *Config) filterTests(paths []string) {
var newTests []Test
for _, path := range paths {
for i := range c.Tests {
if c.Tests[i].Path == path {
newTests = append(newTests, c.Tests[i])
}
}
}
c.Tests = newTests
}
// Remotes returns the unique remotes
func (c *Config) Remotes() (remotes []string) {
found := map[string]struct{}{}
for _, backend := range c.Backends {
if _, ok := found[backend.Remote]; ok {
continue
}
remotes = append(remotes, backend.Remote)
found[backend.Remote] = struct{}{}
}
return remotes
}

109
fstest/test_all/config.yaml Normal file
View File

@@ -0,0 +1,109 @@
tests:
- path: backend
addbackend: true
noretries: true
nobinary: true
- path: fs/operations
subdir: true
fastlist: true
- path: fs/sync
subdir: true
fastlist: true
backends:
# - backend: "amazonclouddrive"
# remote: "TestAmazonCloudDrive:"
# subdir: false
# fastlist: false
- backend: "b2"
remote: "TestB2:"
subdir: true
fastlist: true
- backend: "crypt"
remote: "TestCryptDrive:"
subdir: false
fastlist: true
- backend: "crypt"
remote: "TestCryptSwift:"
subdir: false
fastlist: false
- backend: "drive"
remote: "TestDrive:"
subdir: false
fastlist: true
- backend: "dropbox"
remote: "TestDropbox:"
subdir: false
fastlist: false
- backend: "googlecloudstorage"
remote: "TestGoogleCloudStorage:"
subdir: true
fastlist: true
- backend: "hubic"
remote: "TestHubic:"
subdir: false
fastlist: false
- backend: "jottacloud"
remote: "TestJottacloud:"
subdir: false
fastlist: true
- backend: "onedrive"
remote: "TestOneDrive:"
subdir: false
fastlist: false
- backend: "s3"
remote: "TestS3:"
subdir: true
fastlist: true
- backend: "sftp"
remote: "TestSftp:"
subdir: false
fastlist: false
- backend: "swift"
remote: "TestSwift:"
subdir: true
fastlist: true
- backend: "yandex"
remote: "TestYandex:"
subdir: false
fastlist: false
- backend: "ftp"
remote: "TestFTP:"
subdir: false
fastlist: false
- backend: "box"
remote: "TestBox:"
subdir: false
fastlist: false
- backend: "qingstor"
remote: "TestQingStor:"
subdir: false
fastlist: false
oneonly: true
- backend: "azureblob"
remote: "TestAzureBlob:"
subdir: true
fastlist: true
- backend: "pcloud"
remote: "TestPcloud:"
subdir: false
fastlist: false
- backend: "webdav"
remote: "TestWebdav:"
subdir: false
fastlist: false
- backend: "cache"
remote: "TestCache:"
subdir: false
fastlist: false
- backend: "mega"
remote: "TestMega:"
subdir: false
fastlist: false
- backend: "opendrive"
remote: "TestOpenDrive:"
subdir: false
fastlist: false
- backend: "union"
remote: "TestUnion:"
subdir: false
fastlist: false

283
fstest/test_all/report.go Normal file
View File

@@ -0,0 +1,283 @@
// +build go1.10
package main
import (
"fmt"
"html/template"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"sort"
"time"
"github.com/ncw/rclone/fs"
"github.com/skratchdot/open-golang/open"
)
const timeFormat = "2006-01-02-150405"
// Report holds the info to make a report on a series of test runs
type Report struct {
LogDir string // output directory for logs and report
StartTime time.Time // time started
DateTime string // directory name for output
Duration time.Duration // time the run took
Failed Runs // failed runs
Passed Runs // passed runs
Runs []ReportRun // runs to report
Version string // rclone version
Previous string // previous test name if known
IndexHTML string // path to the index.html file
URL string // online version
}
// ReportRun is used in the templates to report on a test run
type ReportRun struct {
Name string
Runs Runs
}
// NewReport initialises and returns a Report
func NewReport() *Report {
r := &Report{
StartTime: time.Now(),
Version: fs.Version,
}
r.DateTime = r.StartTime.Format(timeFormat)
// Find previous log directory if possible
names, err := ioutil.ReadDir(*outputDir)
if err == nil && len(names) > 0 {
r.Previous = names[len(names)-1].Name()
}
// Create output directory for logs and report
r.LogDir = path.Join(*outputDir, r.DateTime)
err = os.MkdirAll(r.LogDir, 0777)
if err != nil {
log.Fatalf("Failed to make log directory: %v", err)
}
// Online version
r.URL = *urlBase + r.DateTime + "/index.html"
return r
}
// End should be called when the tests are complete
func (r *Report) End() {
r.Duration = time.Since(r.StartTime)
sort.Sort(r.Failed)
sort.Sort(r.Passed)
r.Runs = []ReportRun{
{Name: "Failed", Runs: r.Failed},
{Name: "Passed", Runs: r.Passed},
}
}
// AllPassed returns true if there were no failed tests
func (r *Report) AllPassed() bool {
return len(r.Failed) == 0
}
// RecordResult should be called with a Run when it has finished to be
// recorded into the Report
func (r *Report) RecordResult(t *Run) {
if !t.passed() {
r.Failed = append(r.Failed, t)
} else {
r.Passed = append(r.Passed, t)
}
}
// Title returns a human readable summary title for the Report
func (r *Report) Title() string {
if r.AllPassed() {
return fmt.Sprintf("PASS: All tests finished OK in %v", r.Duration)
}
return fmt.Sprintf("FAIL: %d tests failed in %v", len(r.Failed), r.Duration)
}
// LogSummary writes the summary to the log file
func (r *Report) LogSummary() {
log.Printf("Logs in %q", r.LogDir)
// Summarise results
log.Printf("SUMMARY")
log.Println(r.Title())
if !r.AllPassed() {
for _, t := range r.Failed {
log.Printf(" * %s", toShell(t.nextCmdLine()))
log.Printf(" * Failed tests: %v", t.failedTests)
}
}
}
// LogHTML writes the summary to index.html in LogDir
func (r *Report) LogHTML() {
r.IndexHTML = path.Join(r.LogDir, "index.html")
out, err := os.Create(r.IndexHTML)
if err != nil {
log.Fatalf("Failed to open index.html: %v", err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Failed to close index.html: %v", err)
}
}()
err = reportTemplate.Execute(out, r)
if err != nil {
log.Fatalf("Failed to execute template: %v", err)
}
_ = open.Start("file://" + r.IndexHTML)
}
var reportHTML = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ .Title }}</title>
<style>
table {
border-collapse: collapse;
border-spacing: 0;
border: 1px solid #ddd;
}
table.tests {
width: 100%;
}
table, th, td {
border: 1px solid #264653;
}
.Failed {
color: #BE5B43;
}
.Passed {
color: #17564E;
}
.false {
font-weight: lighter;
}
.true {
font-weight: bold;
}
th, td {
text-align: left;
padding: 4px;
}
tr:nth-child(even) {
background-color: #f2f2f2;
}
a {
color: #5B1955;
text-decoration: none;
}
a:hover, a:focus {
color: #F4A261;
text-decoration:underline;
}
a:focus {
outline: thin dotted;
outline: 5px auto;
}
</style>
</head>
<body>
<h1>{{ .Title }}</h1>
<table>
<tr><th>Version</th><td>{{ .Version }}</td></tr>
<tr><th>Test</th><td><a href="{{ .URL }}">{{ .DateTime}}</a></td></tr>
<tr><th>Duration</th><td>{{ .Duration }}</td></tr>
{{ if .Previous}}<tr><th>Previous</th><td><a href="../{{ .Previous }}/index.html">{{ .Previous }}</a></td></tr>{{ end }}
<tr><th>Up</th><td><a href="../">Older Tests</a></td></tr>
</table>
{{ range .Runs }}
{{ if .Runs }}
<h2 class="{{ .Name }}">{{ .Name }}: {{ len .Runs }}</h2>
<table class="{{ .Name }} tests">
<tr>
<th>Backend</th>
<th>Remote</th>
<th>Test</th>
<th>SubDir</th>
<th>FastList</th>
<th>Failed</th>
<th>Logs</th>
</tr>
{{ $prevBackend := "" }}
{{ $prevRemote := "" }}
{{ range .Runs}}
<tr>
<td>{{ if ne $prevBackend .Backend }}{{ .Backend }}{{ end }}{{ $prevBackend = .Backend }}</td>
<td>{{ if ne $prevRemote .Remote }}{{ .Remote }}{{ end }}{{ $prevRemote = .Remote }}</td>
<td>{{ .Path }}</td>
<td><span class="{{ .SubDir }}">{{ .SubDir }}</span></td>
<td><span class="{{ .FastList }}">{{ .FastList }}</span></td>
<td>{{ .FailedTests }}</td>
<td>{{ range $i, $v := .Logs }}<a href="{{ $v }}">#{{ $i }}</a> {{ end }}</td>
</tr>
{{ end }}
</table>
{{ end }}
{{ end }}
</body>
</html>
`
var reportTemplate = template.Must(template.New("Report").Parse(reportHTML))
// EmailHTML sends the summary report to the email address supplied
func (r *Report) EmailHTML() {
if *emailReport == "" || r.IndexHTML == "" {
return
}
log.Printf("Sending email summary to %q", *emailReport)
cmdLine := []string{"mail", "-a", "Content-Type: text/html", *emailReport, "-s", "rclone integration tests: " + r.Title()}
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
in, err := os.Open(r.IndexHTML)
if err != nil {
log.Fatalf("Failed to open index.html: %v", err)
}
cmd.Stdin = in
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
if err != nil {
log.Fatalf("Failed to send email: %v", err)
}
_ = in.Close()
}
// uploadTo uploads a copy of the report online to the dir given
func (r *Report) uploadTo(uploadDir string) {
dst := path.Join(*uploadPath, uploadDir)
log.Printf("Uploading results to %q", dst)
cmdLine := []string{"rclone", "sync", "--stats-log-level", "NOTICE", r.LogDir, dst}
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to upload results: %v", err)
}
}
// Upload uploads a copy of the report online
func (r *Report) Upload() {
if *uploadPath == "" || r.IndexHTML == "" {
return
}
// Upload into dated directory
r.uploadTo(r.DateTime)
// And again into current
r.uploadTo("current")
}

356
fstest/test_all/run.go Normal file
View File

@@ -0,0 +1,356 @@
// Run a test
// +build go1.10
package main
import (
"bytes"
"fmt"
"go/build"
"io"
"log"
"os"
"os/exec"
"path"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/ncw/rclone/fs"
)
const testBase = "github.com/ncw/rclone/"
// Control concurrency per backend if required
var (
oneOnlyMu sync.Mutex
oneOnly = map[string]*sync.Mutex{}
)
// Run holds info about a running test
//
// A run just runs one command line, but it can be run multiple times
// if retries are needed.
type Run struct {
// Config
Remote string // name of the test remote
Backend string // name of the backend
Path string // path to the source directory
SubDir bool // add -sub-dir to tests
FastList bool // add -fast-list to tests
NoRetries bool // don't retry if set
OneOnly bool // only run test for this backend at once
NoBinary bool // set to not build a binary
// Internals
cmdLine []string
cmdString string
try int
err error
output []byte
failedTests []string
runFlag string
logDir string // directory to place the logs
trialName string // name/log file name of current trial
trialNames []string // list of all the trials
}
// Runs records multiple Run objects
type Runs []*Run
// Sort interface
func (rs Runs) Len() int { return len(rs) }
func (rs Runs) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] }
func (rs Runs) Less(i, j int) bool {
a, b := rs[i], rs[j]
if a.Backend < b.Backend {
return true
} else if a.Backend > b.Backend {
return false
}
if a.Remote < b.Remote {
return true
} else if a.Remote > b.Remote {
return false
}
if a.Path < b.Path {
return true
} else if a.Path > b.Path {
return false
}
if !a.SubDir && b.SubDir {
return true
} else if a.SubDir && !b.SubDir {
return false
}
if !a.FastList && b.FastList {
return true
} else if a.FastList && !b.FastList {
return false
}
return false
}
// dumpOutput prints the error output
func (r *Run) dumpOutput() {
log.Println("------------------------------------------------------------")
log.Printf("---- %q ----", r.cmdString)
log.Println(string(r.output))
log.Println("------------------------------------------------------------")
}
var failRe = regexp.MustCompile(`(?m)^\s*--- FAIL: (Test.*?) \(`)
// findFailures looks for all the tests which failed
func (r *Run) findFailures() {
oldFailedTests := r.failedTests
r.failedTests = nil
excludeParents := map[string]struct{}{}
for _, matches := range failRe.FindAllSubmatch(r.output, -1) {
failedTest := string(matches[1])
r.failedTests = append(r.failedTests, failedTest)
// Find all the parents of this test
parts := strings.Split(failedTest, "/")
for i := len(parts) - 1; i >= 1; i-- {
excludeParents[strings.Join(parts[:i], "/")] = struct{}{}
}
}
// Exclude the parents
var newTests = r.failedTests[:0]
for _, failedTest := range r.failedTests {
if _, excluded := excludeParents[failedTest]; !excluded {
newTests = append(newTests, failedTest)
}
}
r.failedTests = newTests
if len(r.failedTests) != 0 {
r.runFlag = "^(" + strings.Join(r.failedTests, "|") + ")$"
} else {
r.runFlag = ""
}
if r.passed() && len(r.failedTests) != 0 {
log.Printf("%q - Expecting no errors but got: %v", r.cmdString, r.failedTests)
r.dumpOutput()
} else if !r.passed() && len(r.failedTests) == 0 {
log.Printf("%q - Expecting errors but got none: %v", r.cmdString, r.failedTests)
r.dumpOutput()
r.failedTests = oldFailedTests
}
}
// nextCmdLine returns the next command line
func (r *Run) nextCmdLine() []string {
cmdLine := r.cmdLine
if r.runFlag != "" {
cmdLine = append(cmdLine, "-test.run", r.runFlag)
}
return cmdLine
}
// trial runs a single test
func (r *Run) trial() {
cmdLine := r.nextCmdLine()
cmdString := toShell(cmdLine)
msg := fmt.Sprintf("%q - Starting (try %d/%d)", cmdString, r.try, *maxTries)
log.Println(msg)
logName := path.Join(r.logDir, r.trialName)
out, err := os.Create(logName)
if err != nil {
log.Fatalf("Couldn't create log file: %v", err)
}
defer func() {
err := out.Close()
if err != nil {
log.Fatalf("Failed to close log file: %v", err)
}
}()
_, _ = fmt.Fprintln(out, msg)
// Early exit if --try-run
if *dryRun {
log.Printf("Not executing as --dry-run: %v", cmdLine)
_, _ = fmt.Fprintln(out, "--dry-run is set - not running")
return
}
// Internal buffer
var b bytes.Buffer
multiOut := io.MultiWriter(out, &b)
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
cmd.Stderr = multiOut
cmd.Stdout = multiOut
cmd.Dir = r.Path
start := time.Now()
r.err = cmd.Run()
r.output = b.Bytes()
duration := time.Since(start)
r.findFailures()
if r.passed() {
msg = fmt.Sprintf("%q - Finished OK in %v (try %d/%d)", cmdString, duration, r.try, *maxTries)
} else {
msg = fmt.Sprintf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", cmdString, duration, r.try, *maxTries, r.err, r.failedTests)
}
log.Println(msg)
_, _ = fmt.Fprintln(out, msg)
}
// passed returns true if the test passed
func (r *Run) passed() bool {
return r.err == nil
}
// GOPATH returns the current GOPATH
func GOPATH() string {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = build.Default.GOPATH
}
return gopath
}
// BinaryName turns a package name into a binary name
func (r *Run) BinaryName() string {
binary := path.Base(r.Path) + ".test"
if runtime.GOOS == "windows" {
binary += ".exe"
}
return binary
}
// BinaryPath turns a package name into a binary path
func (r *Run) BinaryPath() string {
return path.Join(r.Path, r.BinaryName())
}
// PackagePath returns the path to the package
func (r *Run) PackagePath() string {
return path.Join(GOPATH(), "src", r.Path)
}
// MakeTestBinary makes the binary we will run
func (r *Run) MakeTestBinary() {
binary := r.BinaryPath()
binaryName := r.BinaryName()
log.Printf("%s: Making test binary %q", r.Path, binaryName)
cmdLine := []string{"go", "test", "-c"}
if *dryRun {
log.Printf("Not executing: %v", cmdLine)
return
}
cmd := exec.Command(cmdLine[0], cmdLine[1:]...)
cmd.Dir = r.Path
err := cmd.Run()
if err != nil {
log.Fatalf("Failed to make test binary: %v", err)
}
if _, err := os.Stat(binary); err != nil {
log.Fatalf("Couldn't find test binary %q", binary)
}
}
// RemoveTestBinary removes the binary made in makeTestBinary
func (r *Run) RemoveTestBinary() {
if *dryRun {
return
}
binary := r.BinaryPath()
err := os.Remove(binary) // Delete the binary when finished
if err != nil {
log.Printf("Error removing test binary %q: %v", binary, err)
}
}
// Name returns the run name as a file name friendly string
func (r *Run) Name() string {
ns := []string{
r.Backend,
strings.Replace(r.Path, "/", ".", -1),
r.Remote,
}
if r.SubDir {
ns = append(ns, "subdir")
}
if r.FastList {
ns = append(ns, "fastlist")
}
ns = append(ns, fmt.Sprintf("%d", r.try))
s := strings.Join(ns, "-")
s = strings.Replace(s, ":", "", -1)
return s
}
// Init the Run
func (r *Run) Init() {
prefix := "-test."
if r.NoBinary {
prefix = "-"
r.cmdLine = []string{"go", "test"}
} else {
r.cmdLine = []string{"./" + r.BinaryName()}
}
r.cmdLine = append(r.cmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
r.try = 1
if *verbose {
r.cmdLine = append(r.cmdLine, "-verbose")
fs.Config.LogLevel = fs.LogLevelDebug
}
if *runOnly != "" {
r.cmdLine = append(r.cmdLine, prefix+"run", *runOnly)
}
if r.SubDir {
r.cmdLine = append(r.cmdLine, "-subdir")
}
if r.FastList {
r.cmdLine = append(r.cmdLine, "-fast-list")
}
r.cmdString = toShell(r.cmdLine)
}
// Logs returns all the log names
func (r *Run) Logs() []string {
return r.trialNames
}
// FailedTests returns the failed tests as a comma separated string, limiting the number
func (r *Run) FailedTests() string {
const maxTests = 5
ts := r.failedTests
if len(ts) > maxTests {
ts = ts[:maxTests:maxTests]
ts = append(ts, fmt.Sprintf("… (%d more)", len(r.failedTests)-maxTests))
}
return strings.Join(ts, ", ")
}
// Run runs all the trials for this test
func (r *Run) Run(logDir string, result chan<- *Run) {
if r.OneOnly {
oneOnlyMu.Lock()
mu := oneOnly[r.Backend]
if mu == nil {
mu = new(sync.Mutex)
oneOnly[r.Backend] = mu
}
oneOnlyMu.Unlock()
mu.Lock()
defer mu.Unlock()
}
r.Init()
r.logDir = logDir
for r.try = 1; r.try <= *maxTries; r.try++ {
r.trialName = r.Name() + ".txt"
r.trialNames = append(r.trialNames, r.trialName)
log.Printf("Starting run with log %q", r.trialName)
r.trial()
if r.passed() || r.NoRetries {
break
}
}
if !r.passed() {
r.dumpOutput()
}
result <- r
}

Some files were not shown because too many files have changed in this diff Show More