mirror of
https://github.com/rclone/rclone.git
synced 2026-02-06 03:33:54 +00:00
Compare commits
6 Commits
fix-bitrix
...
fix-onedri
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6071db565c | ||
|
|
d0f8b4f479 | ||
|
|
58d82a5c73 | ||
|
|
c0c74003f2 | ||
|
|
60bc7a079a | ||
|
|
20c5ca08fb |
@@ -897,6 +897,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
lastID := "\x00"
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -911,6 +912,10 @@ OUTER:
|
||||
if len(result.Value) == 0 {
|
||||
break
|
||||
}
|
||||
if result.Value[0].ID == lastID {
|
||||
fs.Errorf(f, "Skipping duplicate entry %q in directory %q", lastID, dirID)
|
||||
result.Value = result.Value[1:]
|
||||
}
|
||||
for i := range result.Value {
|
||||
item := &result.Value[i]
|
||||
isFolder := item.GetFolder() != nil
|
||||
@@ -937,6 +942,9 @@ OUTER:
|
||||
}
|
||||
opts.Path = ""
|
||||
opts.RootURL = result.NextLink
|
||||
if len(result.Value) > 0 {
|
||||
lastID = result.Value[len(result.Value)-1].ID
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1067,7 +1067,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Path: addSlash(srcPath),
|
||||
NoResponse: true,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Destination": destinationURL.String(),
|
||||
"Destination": addSlash(destinationURL.String()),
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -787,6 +787,27 @@ triggering follow-on actions if data was copied, or skipping if not.
|
||||
NB: Enabling this option turns a usually non-fatal error into a potentially
|
||||
fatal one - please check and adjust your scripts accordingly!
|
||||
|
||||
### --fs-cache-expire-duration=TIME
|
||||
|
||||
When using rclone via the API rclone caches created remotes for 5
|
||||
minutes by default in the "fs cache". This means that if you do
|
||||
repeated actions on the same remote then rclone won't have to build it
|
||||
again from scratch, which makes it more efficient.
|
||||
|
||||
This flag sets the time that the remotes are cached for. If you set it
|
||||
to `0` (or negative) then rclone won't cache the remotes at all.
|
||||
|
||||
Note that if you use some flags, eg `--backup-dir` and if this is set
|
||||
to `0` rclone may build two remotes (one for the source or destination
|
||||
and one for the `--backup-dir` where it may have only built one
|
||||
before.
|
||||
|
||||
### --fs-cache-expire-interval=TIME
|
||||
|
||||
This controls how often rclone checks for cached remotes to expire.
|
||||
See the `--fs-cache-expire-duration` documentation above for more
|
||||
info. The default is 60s, set to 0 to disable expiry.
|
||||
|
||||
### --header ###
|
||||
|
||||
Add an HTTP header for all transactions. The flag can be repeated to
|
||||
|
||||
@@ -378,6 +378,55 @@ call and taken by the [options/set](#options-set) calls as well as the
|
||||
- `BandwidthSpec` - this will be set and returned as a string, eg
|
||||
"1M".
|
||||
|
||||
## Specifying remotes to work on
|
||||
|
||||
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
|
||||
parameters depending on the command being used.
|
||||
|
||||
The parameters can be a string as per the rest of rclone, eg
|
||||
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
|
||||
JSON blobs.
|
||||
|
||||
If specifyng a JSON blob it should be a object mapping strings to
|
||||
strings. These values will be used to configure the remote. There are
|
||||
3 special values which may be set:
|
||||
|
||||
- `type` - set to `type` to specify a remote called `:type:`
|
||||
- `_name` - set to `name` to specify a remote called `name:`
|
||||
- `_root` - sets the root of the remote - may be empty
|
||||
|
||||
One of `_name` or `type` should normally be set. If the `local`
|
||||
backend is desired then `type` should be set to `local`. If `_root`
|
||||
isn't specified then it defaults to the root of the remote.
|
||||
|
||||
For example this JSON is equivalent to `remote:/tmp`
|
||||
|
||||
```
|
||||
{
|
||||
"_name": "remote",
|
||||
"_path": "/tmp"
|
||||
}
|
||||
```
|
||||
|
||||
And this is equivalent to `:sftp,host='example.com':/tmp`
|
||||
|
||||
```
|
||||
{
|
||||
"type": "sftp",
|
||||
"host": "example.com",
|
||||
"_path": "/tmp"
|
||||
}
|
||||
```
|
||||
|
||||
And this is equivalent to `/tmp/dir`
|
||||
|
||||
```
|
||||
{
|
||||
type = "local",
|
||||
_ path = "/tmp/dir"
|
||||
}
|
||||
```
|
||||
|
||||
## Supported commands
|
||||
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
|
||||
### backend/command: Runs a backend command. {#backend-command}
|
||||
|
||||
37
fs/cache/cache.go
vendored
37
fs/cache/cache.go
vendored
@@ -12,14 +12,26 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
c = cache.New()
|
||||
once sync.Once // creation
|
||||
c *cache.Cache
|
||||
mu sync.Mutex // mutex to protect remap
|
||||
remap = map[string]string{} // map user supplied names to canonical names
|
||||
)
|
||||
|
||||
// Create the cache just once
|
||||
func createOnFirstUse() {
|
||||
once.Do(func() {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
c = cache.New()
|
||||
c.SetExpireDuration(ci.FsCacheExpireDuration)
|
||||
c.SetExpireInterval(ci.FsCacheExpireInterval)
|
||||
})
|
||||
}
|
||||
|
||||
// Canonicalize looks up fsString in the mapping from user supplied
|
||||
// names to canonical names and return the canonical form
|
||||
func Canonicalize(fsString string) string {
|
||||
createOnFirstUse()
|
||||
mu.Lock()
|
||||
canonicalName, ok := remap[fsString]
|
||||
mu.Unlock()
|
||||
@@ -43,10 +55,11 @@ func addMapping(fsString, canonicalName string) {
|
||||
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
||||
// it afresh with the create function
|
||||
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||
fsString = Canonicalize(fsString)
|
||||
createOnFirstUse()
|
||||
canonicalFsString := Canonicalize(fsString)
|
||||
created := false
|
||||
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
|
||||
f, err = create(ctx, fsString)
|
||||
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
|
||||
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
|
||||
ok = err == nil || err == fs.ErrorIsFile
|
||||
created = ok
|
||||
return f, ok, err
|
||||
@@ -58,19 +71,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
||||
// Check we stored the Fs at the canonical name
|
||||
if created {
|
||||
canonicalName := fs.ConfigString(f)
|
||||
if canonicalName != fsString {
|
||||
if canonicalName != canonicalFsString {
|
||||
// Note that if err == fs.ErrorIsFile at this moment
|
||||
// then we can't rename the remote as it will have the
|
||||
// wrong error status, we need to add a new one.
|
||||
if err == nil {
|
||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
|
||||
value, found := c.Rename(fsString, canonicalName)
|
||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
|
||||
value, found := c.Rename(canonicalFsString, canonicalName)
|
||||
if found {
|
||||
f = value.(fs.Fs)
|
||||
}
|
||||
addMapping(fsString, canonicalName)
|
||||
addMapping(canonicalFsString, canonicalName)
|
||||
} else {
|
||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
|
||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
|
||||
Put(canonicalName, f)
|
||||
}
|
||||
}
|
||||
@@ -80,6 +93,7 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
||||
|
||||
// Pin f into the cache until Unpin is called
|
||||
func Pin(f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
c.Pin(fs.ConfigString(f))
|
||||
}
|
||||
|
||||
@@ -97,6 +111,7 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
|
||||
|
||||
// Unpin f from the cache
|
||||
func Unpin(f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
c.Pin(fs.ConfigString(f))
|
||||
}
|
||||
|
||||
@@ -127,6 +142,7 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
|
||||
|
||||
// Put puts an fs.Fs named fsString into the cache
|
||||
func Put(fsString string, f fs.Fs) {
|
||||
createOnFirstUse()
|
||||
canonicalName := fs.ConfigString(f)
|
||||
c.Put(canonicalName, f)
|
||||
addMapping(fsString, canonicalName)
|
||||
@@ -136,15 +152,18 @@ func Put(fsString string, f fs.Fs) {
|
||||
//
|
||||
// Returns number of entries deleted
|
||||
func ClearConfig(name string) (deleted int) {
|
||||
createOnFirstUse()
|
||||
return c.DeletePrefix(name + ":")
|
||||
}
|
||||
|
||||
// Clear removes everything from the cache
|
||||
func Clear() {
|
||||
createOnFirstUse()
|
||||
c.Clear()
|
||||
}
|
||||
|
||||
// Entries returns the number of entries in the cache
|
||||
func Entries() int {
|
||||
createOnFirstUse()
|
||||
return c.Entries()
|
||||
}
|
||||
|
||||
32
fs/cache/cache_test.go
vendored
32
fs/cache/cache_test.go
vendored
@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
|
||||
panic("unreachable")
|
||||
}
|
||||
cleanup := func() {
|
||||
c.Clear()
|
||||
Clear()
|
||||
}
|
||||
return cleanup, create
|
||||
}
|
||||
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 2, c.Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 2, c.Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
f, err := GetFn(context.Background(), "mock:/error", create)
|
||||
require.Equal(t, errSentinel, err)
|
||||
require.Equal(t, nil, f)
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
|
||||
|
||||
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
Put("mock:/alien", f)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/alien", create)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
// Check canonicalisation
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ func TestClearConfig(t *testing.T) {
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
_, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
@@ -190,11 +190,11 @@ func TestClear(t *testing.T) {
|
||||
_, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
Clear()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
assert.Equal(t, 0, Entries())
|
||||
}
|
||||
|
||||
func TestEntries(t *testing.T) {
|
||||
|
||||
@@ -123,6 +123,8 @@ type ConfigInfo struct {
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
@@ -160,6 +162,8 @@ func NewConfig() *ConfigInfo {
|
||||
c.MultiThreadStreams = 4
|
||||
|
||||
c.TrackRenamesStrategy = "hash"
|
||||
c.FsCacheExpireDuration = 300 * time.Second
|
||||
c.FsCacheExpireInterval = 60 * time.Second
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -128,6 +128,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
|
||||
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
|
||||
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
|
||||
@@ -4,21 +4,64 @@ package rc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
)
|
||||
|
||||
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
|
||||
func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) {
|
||||
fsString, err := in.GetString(fsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !IsErrParamInvalid(err) {
|
||||
return nil, err
|
||||
}
|
||||
fsString, err = getConfigMap(in, fsName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cache.Get(ctx, fsString)
|
||||
}
|
||||
|
||||
// getConfigMap gets the config as a map from in and converts it to a
|
||||
// config string
|
||||
//
|
||||
// It uses the special parameters _name to name the remote and _root
|
||||
// to make the root of the remote.
|
||||
func getConfigMap(in Params, fsName string) (fsString string, err error) {
|
||||
var m configmap.Simple
|
||||
err = in.GetStruct(fsName, &m)
|
||||
if err != nil {
|
||||
return fsString, err
|
||||
}
|
||||
pop := func(key string) string {
|
||||
value := m[key]
|
||||
delete(m, key)
|
||||
return value
|
||||
}
|
||||
Type := pop("type")
|
||||
name := pop("_name")
|
||||
root := pop("_root")
|
||||
if name != "" {
|
||||
fsString = name
|
||||
} else if Type != "" {
|
||||
fsString = ":" + Type
|
||||
} else {
|
||||
return fsString, errors.New(`couldn't find "type" or "_name" in JSON config definition`)
|
||||
}
|
||||
config := m.String()
|
||||
if config != "" {
|
||||
fsString += ","
|
||||
fsString += config
|
||||
}
|
||||
fsString += ":"
|
||||
fsString += root
|
||||
return fsString, nil
|
||||
}
|
||||
|
||||
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh
|
||||
func GetFs(ctx context.Context, in Params) (f fs.Fs, err error) {
|
||||
return GetFsNamed(ctx, in, "fs")
|
||||
|
||||
@@ -2,6 +2,7 @@ package rc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
@@ -13,6 +14,8 @@ import (
|
||||
func mockNewFs(t *testing.T) func() {
|
||||
f := mockfs.NewFs(context.Background(), "mock", "mock")
|
||||
cache.Put("/", f)
|
||||
cache.Put("mock:/", f)
|
||||
cache.Put(":mock:/", f)
|
||||
return func() {
|
||||
cache.Clear()
|
||||
}
|
||||
@@ -36,6 +39,98 @@ func TestGetFsNamed(t *testing.T) {
|
||||
assert.Nil(t, f)
|
||||
}
|
||||
|
||||
func TestGetFsNamedStruct(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
in := Params{
|
||||
"potato": Params{
|
||||
"type": "mock",
|
||||
"_root": "/",
|
||||
},
|
||||
}
|
||||
f, err := GetFsNamed(context.Background(), in, "potato")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, f)
|
||||
|
||||
in = Params{
|
||||
"potato": Params{
|
||||
"_name": "mock",
|
||||
"_root": "/",
|
||||
},
|
||||
}
|
||||
f, err = GetFsNamed(context.Background(), in, "potato")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, f)
|
||||
}
|
||||
|
||||
func TestGetConfigMap(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in Params
|
||||
fsName string
|
||||
wantFsString string
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantErr: `couldn't find "type" or "_name" in JSON config definition`,
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"notastring": true,
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantErr: `cannot unmarshal bool`,
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"_name": "potato",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: "potato:",
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"type": "potato",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: ":potato:",
|
||||
},
|
||||
{
|
||||
in: Params{
|
||||
"Fs": Params{
|
||||
"type": "sftp",
|
||||
"_name": "potato",
|
||||
"parameter": "42",
|
||||
"parameter2": "true",
|
||||
"_root": "/path/to/somewhere",
|
||||
},
|
||||
},
|
||||
fsName: "Fs",
|
||||
wantFsString: "potato,parameter='42',parameter2='true':/path/to/somewhere",
|
||||
},
|
||||
} {
|
||||
gotFsString, gotErr := getConfigMap(test.in, test.fsName)
|
||||
what := fmt.Sprintf("%+v", test.in)
|
||||
assert.Equal(t, test.wantFsString, gotFsString, what)
|
||||
if test.wantErr == "" {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
require.Error(t, gotErr)
|
||||
assert.Contains(t, gotErr.Error(), test.wantErr)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFs(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
|
||||
@@ -279,3 +279,26 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
|
||||
}
|
||||
return duration, nil
|
||||
}
|
||||
|
||||
// Error creates the standard response for an errored rc call using an
|
||||
// rc.Param from a path, input Params, error and a suggested HTTP
|
||||
// response code.
|
||||
//
|
||||
// It returns a Params and an updated status code
|
||||
func Error(path string, in Params, err error, status int) (Params, int) {
|
||||
// Adjust the status code for some well known errors
|
||||
errOrig := errors.Cause(err)
|
||||
switch {
|
||||
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
|
||||
status = http.StatusNotFound
|
||||
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
|
||||
status = http.StatusBadRequest
|
||||
}
|
||||
result := Params{
|
||||
"status": status,
|
||||
"error": err.Error(),
|
||||
"input": in,
|
||||
"path": path,
|
||||
}
|
||||
return result, status
|
||||
}
|
||||
|
||||
@@ -169,21 +169,9 @@ func (s *Server) Serve() error {
|
||||
// writeError writes a formatted error to the output
|
||||
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
|
||||
fs.Errorf(nil, "rc: %q: error: %v", path, err)
|
||||
// Adjust the error return for some well known errors
|
||||
errOrig := errors.Cause(err)
|
||||
switch {
|
||||
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
|
||||
status = http.StatusNotFound
|
||||
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
|
||||
status = http.StatusBadRequest
|
||||
}
|
||||
params, status := rc.Error(path, in, err, status)
|
||||
w.WriteHeader(status)
|
||||
err = rc.WriteJSON(w, rc.Params{
|
||||
"status": status,
|
||||
"error": err.Error(),
|
||||
"input": in,
|
||||
"path": path,
|
||||
})
|
||||
err = rc.WriteJSON(w, params)
|
||||
if err != nil {
|
||||
// can't return the error at this point
|
||||
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)
|
||||
|
||||
@@ -12,6 +12,7 @@ Make TesTrun have a []string of flags to try - that then makes it generic
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"math/rand"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all fs
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
@@ -70,6 +72,7 @@ func main() {
|
||||
log.Println("test_all should be run from the root of the rclone source code")
|
||||
log.Fatal(err)
|
||||
}
|
||||
configfile.LoadConfig(context.Background())
|
||||
|
||||
// Seed the random number generator
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
31
lib/cache/cache.go
vendored
31
lib/cache/cache.go
vendored
@@ -28,6 +28,30 @@ func New() *Cache {
|
||||
}
|
||||
}
|
||||
|
||||
// SetExpireDuration sets the interval at which things expire
|
||||
//
|
||||
// If it is less than or equal to 0 then things are never cached
|
||||
func (c *Cache) SetExpireDuration(d time.Duration) *Cache {
|
||||
c.expireDuration = d
|
||||
return c
|
||||
}
|
||||
|
||||
// returns true if we aren't to cache anything
|
||||
func (c *Cache) noCache() bool {
|
||||
return c.expireDuration <= 0
|
||||
}
|
||||
|
||||
// SetExpireInterval sets the interval at which the cache expiry runs
|
||||
//
|
||||
// Set to 0 or a -ve number to disable
|
||||
func (c *Cache) SetExpireInterval(d time.Duration) *Cache {
|
||||
if d <= 0 {
|
||||
d = 100 * 365 * 24 * time.Hour
|
||||
}
|
||||
c.expireInterval = d
|
||||
return c
|
||||
}
|
||||
|
||||
// cacheEntry is stored in the cache
|
||||
type cacheEntry struct {
|
||||
value interface{} // cached item
|
||||
@@ -69,7 +93,9 @@ func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error
|
||||
err: err,
|
||||
}
|
||||
c.mu.Lock()
|
||||
c.cache[key] = entry
|
||||
if !c.noCache() {
|
||||
c.cache[key] = entry
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
c.used(entry)
|
||||
@@ -100,6 +126,9 @@ func (c *Cache) Unpin(key string) {
|
||||
func (c *Cache) Put(key string, value interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.noCache() {
|
||||
return
|
||||
}
|
||||
entry := &cacheEntry{
|
||||
value: value,
|
||||
key: key,
|
||||
|
||||
27
lib/cache/cache_test.go
vendored
27
lib/cache/cache_test.go
vendored
@@ -100,7 +100,7 @@ func TestPut(t *testing.T) {
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
c.expireInterval = time.Millisecond
|
||||
c.SetExpireInterval(time.Millisecond)
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
|
||||
_, err := c.Get("/", create)
|
||||
@@ -127,6 +127,31 @@ func TestCacheExpire(t *testing.T) {
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func TestCacheNoExpire(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.False(t, c.noCache())
|
||||
|
||||
c.SetExpireDuration(0)
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
|
||||
assert.True(t, c.noCache())
|
||||
|
||||
f, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
|
||||
c.Put("/alien", "slime")
|
||||
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func TestCachePin(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user