1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-06 03:33:54 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
9ea209a9ab webdav: fix DirMove for bitrix - FIXME DO NOT MERGE
This implements a workaround to enable DirMove for bitrix to work.

This is an apparently non RFC compliant change.

See: https://forum.rclone.org/t/cant-manage-folders-using-bitrix-webdav-under-windows/23073
2021-03-29 19:32:09 +01:00
15 changed files with 44 additions and 353 deletions

View File

@@ -897,7 +897,6 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
lastID := "\x00"
OUTER:
for {
var result api.ListChildrenResponse
@@ -912,10 +911,6 @@ OUTER:
if len(result.Value) == 0 {
break
}
if result.Value[0].ID == lastID {
fs.Errorf(f, "Skipping duplicate entry %q in directory %q", lastID, dirID)
result.Value = result.Value[1:]
}
for i := range result.Value {
item := &result.Value[i]
isFolder := item.GetFolder() != nil
@@ -942,9 +937,6 @@ OUTER:
}
opts.Path = ""
opts.RootURL = result.NextLink
if len(result.Value) > 0 {
lastID = result.Value[len(result.Value)-1].ID
}
}
return
}

View File

@@ -1067,7 +1067,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Path: addSlash(srcPath),
NoResponse: true,
ExtraHeaders: map[string]string{
"Destination": addSlash(destinationURL.String()),
"Destination": destinationURL.String(),
"Overwrite": "F",
},
}

View File

@@ -787,27 +787,6 @@ triggering follow-on actions if data was copied, or skipping if not.
NB: Enabling this option turns a usually non-fatal error into a potentially
fatal one - please check and adjust your scripts accordingly!
### --fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5
minutes by default in the "fs cache". This means that if you do
repeated actions on the same remote then rclone won't have to build it
again from scratch, which makes it more efficient.
This flag sets the time that the remotes are cached for. If you set it
to `0` (or negative) then rclone won't cache the remotes at all.
Note that if you use some flags, eg `--backup-dir` and if this is set
to `0` rclone may build two remotes (one for the source or destination
and one for the `--backup-dir` where it may have only built one
before.
### --fs-cache-expire-interval=TIME
This controls how often rclone checks for cached remotes to expire.
See the `--fs-cache-expire-duration` documentation above for more
info. The default is 60s, set to 0 to disable expiry.
### --header ###
Add an HTTP header for all transactions. The flag can be repeated to

View File

@@ -378,55 +378,6 @@ call and taken by the [options/set](#options-set) calls as well as the
- `BandwidthSpec` - this will be set and returned as a string, eg
"1M".
## Specifying remotes to work on
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
parameters depending on the command being used.
The parameters can be a string as per the rest of rclone, eg
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
JSON blobs.
If specifyng a JSON blob it should be a object mapping strings to
strings. These values will be used to configure the remote. There are
3 special values which may be set:
- `type` - set to `type` to specify a remote called `:type:`
- `_name` - set to `name` to specify a remote called `name:`
- `_root` - sets the root of the remote - may be empty
One of `_name` or `type` should normally be set. If the `local`
backend is desired then `type` should be set to `local`. If `_root`
isn't specified then it defaults to the root of the remote.
For example this JSON is equivalent to `remote:/tmp`
```
{
"_name": "remote",
"_path": "/tmp"
}
```
And this is equivalent to `:sftp,host='example.com':/tmp`
```
{
"type": "sftp",
"host": "example.com",
"_path": "/tmp"
}
```
And this is equivalent to `/tmp/dir`
```
{
type = "local",
_ path = "/tmp/dir"
}
```
## Supported commands
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
### backend/command: Runs a backend command. {#backend-command}

37
fs/cache/cache.go vendored
View File

@@ -12,26 +12,14 @@ import (
)
var (
once sync.Once // creation
c *cache.Cache
c = cache.New()
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names
)
// Create the cache just once
func createOnFirstUse() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
c = cache.New()
c.SetExpireDuration(ci.FsCacheExpireDuration)
c.SetExpireInterval(ci.FsCacheExpireInterval)
})
}
// Canonicalize looks up fsString in the mapping from user supplied
// names to canonical names and return the canonical form
func Canonicalize(fsString string) string {
createOnFirstUse()
mu.Lock()
canonicalName, ok := remap[fsString]
mu.Unlock()
@@ -55,11 +43,10 @@ func addMapping(fsString, canonicalName string) {
// GetFn gets an fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
createOnFirstUse()
canonicalFsString := Canonicalize(fsString)
fsString = Canonicalize(fsString)
created := false
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString)
ok = err == nil || err == fs.ErrorIsFile
created = ok
return f, ok, err
@@ -71,19 +58,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Check we stored the Fs at the canonical name
if created {
canonicalName := fs.ConfigString(f)
if canonicalName != canonicalFsString {
if canonicalName != fsString {
// Note that if err == fs.ErrorIsFile at this moment
// then we can't rename the remote as it will have the
// wrong error status, we need to add a new one.
if err == nil {
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName)
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
value, found := c.Rename(fsString, canonicalName)
if found {
f = value.(fs.Fs)
}
addMapping(canonicalFsString, canonicalName)
addMapping(fsString, canonicalName)
} else {
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
Put(canonicalName, f)
}
}
@@ -93,7 +80,6 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Pin f into the cache until Unpin is called
func Pin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
@@ -111,7 +97,6 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
// Unpin f from the cache
func Unpin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
@@ -142,7 +127,6 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
// Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) {
createOnFirstUse()
canonicalName := fs.ConfigString(f)
c.Put(canonicalName, f)
addMapping(fsString, canonicalName)
@@ -152,18 +136,15 @@ func Put(fsString string, f fs.Fs) {
//
// Returns number of entries deleted
func ClearConfig(name string) (deleted int) {
createOnFirstUse()
return c.DeletePrefix(name + ":")
}
// Clear removes everything from the cache
func Clear() {
createOnFirstUse()
c.Clear()
}
// Entries returns the number of entries in the cache
func Entries() int {
createOnFirstUse()
return c.Entries()
}

View File

@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
panic("unreachable")
}
cleanup := func() {
Clear()
c.Clear()
}
return cleanup, create
}
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
f2, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 2, c.Entries())
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, Entries())
assert.Equal(t, 2, c.Entries())
f2, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
f, err := GetFn(context.Background(), "mock:/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
}
func TestPut(t *testing.T) {
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
f := mockfs.NewFs(context.Background(), "mock", "/alien")
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
Put("mock:/alien", f)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
fNew, err := GetFn(context.Background(), "mock:/alien", create)
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
// Check canonicalisation
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
}
@@ -170,7 +170,7 @@ func TestClearConfig(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
_, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -190,11 +190,11 @@ func TestClear(t *testing.T) {
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, Entries())
assert.Equal(t, 1, c.Entries())
Clear()
assert.Equal(t, 0, Entries())
assert.Equal(t, 0, c.Entries())
}
func TestEntries(t *testing.T) {

View File

@@ -123,8 +123,6 @@ type ConfigInfo struct {
RefreshTimes bool
NoConsole bool
TrafficClass uint8
FsCacheExpireDuration time.Duration
FsCacheExpireInterval time.Duration
}
// NewConfig creates a new config with everything set to the default
@@ -162,8 +160,6 @@ func NewConfig() *ConfigInfo {
c.MultiThreadStreams = 4
c.TrackRenamesStrategy = "hash"
c.FsCacheExpireDuration = 300 * time.Second
c.FsCacheExpireInterval = 60 * time.Second
return c
}

View File

@@ -128,8 +128,6 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)")
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes")
}
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions

View File

@@ -4,62 +4,19 @@ package rc
import (
"context"
"errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
)
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) {
fsString, err := in.GetString(fsName)
if err != nil {
if !IsErrParamInvalid(err) {
return nil, err
}
fsString, err = getConfigMap(in, fsName)
if err != nil {
return nil, err
}
return nil, err
}
return cache.Get(ctx, fsString)
}
// getConfigMap gets the config as a map from in and converts it to a
// config string
//
// It uses the special parameters _name to name the remote and _root
// to make the root of the remote.
func getConfigMap(in Params, fsName string) (fsString string, err error) {
var m configmap.Simple
err = in.GetStruct(fsName, &m)
if err != nil {
return fsString, err
}
pop := func(key string) string {
value := m[key]
delete(m, key)
return value
}
Type := pop("type")
name := pop("_name")
root := pop("_root")
if name != "" {
fsString = name
} else if Type != "" {
fsString = ":" + Type
} else {
return fsString, errors.New(`couldn't find "type" or "_name" in JSON config definition`)
}
config := m.String()
if config != "" {
fsString += ","
fsString += config
}
fsString += ":"
fsString += root
return fsString, nil
return cache.Get(ctx, fsString)
}
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh

View File

@@ -2,7 +2,6 @@ package rc
import (
"context"
"fmt"
"testing"
"github.com/rclone/rclone/fs/cache"
@@ -14,8 +13,6 @@ import (
func mockNewFs(t *testing.T) func() {
f := mockfs.NewFs(context.Background(), "mock", "mock")
cache.Put("/", f)
cache.Put("mock:/", f)
cache.Put(":mock:/", f)
return func() {
cache.Clear()
}
@@ -39,98 +36,6 @@ func TestGetFsNamed(t *testing.T) {
assert.Nil(t, f)
}
func TestGetFsNamedStruct(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"potato": Params{
"type": "mock",
"_root": "/",
},
}
f, err := GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
in = Params{
"potato": Params{
"_name": "mock",
"_root": "/",
},
}
f, err = GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
}
func TestGetConfigMap(t *testing.T) {
for _, test := range []struct {
in Params
fsName string
wantFsString string
wantErr string
}{
{
in: Params{
"Fs": Params{},
},
fsName: "Fs",
wantErr: `couldn't find "type" or "_name" in JSON config definition`,
},
{
in: Params{
"Fs": Params{
"notastring": true,
},
},
fsName: "Fs",
wantErr: `cannot unmarshal bool`,
},
{
in: Params{
"Fs": Params{
"_name": "potato",
},
},
fsName: "Fs",
wantFsString: "potato:",
},
{
in: Params{
"Fs": Params{
"type": "potato",
},
},
fsName: "Fs",
wantFsString: ":potato:",
},
{
in: Params{
"Fs": Params{
"type": "sftp",
"_name": "potato",
"parameter": "42",
"parameter2": "true",
"_root": "/path/to/somewhere",
},
},
fsName: "Fs",
wantFsString: "potato,parameter='42',parameter2='true':/path/to/somewhere",
},
} {
gotFsString, gotErr := getConfigMap(test.in, test.fsName)
what := fmt.Sprintf("%+v", test.in)
assert.Equal(t, test.wantFsString, gotFsString, what)
if test.wantErr == "" {
assert.NoError(t, gotErr)
} else {
require.Error(t, gotErr)
assert.Contains(t, gotErr.Error(), test.wantErr)
}
}
}
func TestGetFs(t *testing.T) {
defer mockNewFs(t)()

View File

@@ -279,26 +279,3 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
}
return duration, nil
}
// Error creates the standard response for an errored rc call using an
// rc.Param from a path, input Params, error and a suggested HTTP
// response code.
//
// It returns a Params and an updated status code
func Error(path string, in Params, err error, status int) (Params, int) {
// Adjust the status code for some well known errors
errOrig := errors.Cause(err)
switch {
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
status = http.StatusNotFound
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
status = http.StatusBadRequest
}
result := Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
}
return result, status
}

View File

@@ -169,9 +169,21 @@ func (s *Server) Serve() error {
// writeError writes a formatted error to the output
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
params, status := rc.Error(path, in, err, status)
// Adjust the error return for some well known errors
errOrig := errors.Cause(err)
switch {
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
status = http.StatusNotFound
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
status = http.StatusBadRequest
}
w.WriteHeader(status)
err = rc.WriteJSON(w, params)
err = rc.WriteJSON(w, rc.Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
})
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)

View File

@@ -12,7 +12,6 @@ Make TesTrun have a []string of flags to try - that then makes it generic
*/
import (
"context"
"flag"
"log"
"math/rand"
@@ -23,7 +22,6 @@ import (
"time"
_ "github.com/rclone/rclone/backend/all" // import all fs
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/lib/pacer"
)
@@ -72,7 +70,6 @@ func main() {
log.Println("test_all should be run from the root of the rclone source code")
log.Fatal(err)
}
configfile.LoadConfig(context.Background())
// Seed the random number generator
rand.Seed(time.Now().UTC().UnixNano())

31
lib/cache/cache.go vendored
View File

@@ -28,30 +28,6 @@ func New() *Cache {
}
}
// SetExpireDuration sets the interval at which things expire
//
// If it is less than or equal to 0 then things are never cached
func (c *Cache) SetExpireDuration(d time.Duration) *Cache {
c.expireDuration = d
return c
}
// returns true if we aren't to cache anything
func (c *Cache) noCache() bool {
return c.expireDuration <= 0
}
// SetExpireInterval sets the interval at which the cache expiry runs
//
// Set to 0 or a -ve number to disable
func (c *Cache) SetExpireInterval(d time.Duration) *Cache {
if d <= 0 {
d = 100 * 365 * 24 * time.Hour
}
c.expireInterval = d
return c
}
// cacheEntry is stored in the cache
type cacheEntry struct {
value interface{} // cached item
@@ -93,9 +69,7 @@ func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error
err: err,
}
c.mu.Lock()
if !c.noCache() {
c.cache[key] = entry
}
c.cache[key] = entry
}
defer c.mu.Unlock()
c.used(entry)
@@ -126,9 +100,6 @@ func (c *Cache) Unpin(key string) {
func (c *Cache) Put(key string, value interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
if c.noCache() {
return
}
entry := &cacheEntry{
value: value,
key: key,

View File

@@ -100,7 +100,7 @@ func TestPut(t *testing.T) {
func TestCacheExpire(t *testing.T) {
c, create := setup(t)
c.SetExpireInterval(time.Millisecond)
c.expireInterval = time.Millisecond
assert.Equal(t, false, c.expireRunning)
_, err := c.Get("/", create)
@@ -127,31 +127,6 @@ func TestCacheExpire(t *testing.T) {
c.mu.Unlock()
}
func TestCacheNoExpire(t *testing.T) {
c, create := setup(t)
assert.False(t, c.noCache())
c.SetExpireDuration(0)
assert.Equal(t, false, c.expireRunning)
assert.True(t, c.noCache())
f, err := c.Get("/", create)
require.NoError(t, err)
require.NotNil(t, f)
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
c.Put("/alien", "slime")
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestCachePin(t *testing.T) {
c, create := setup(t)