1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-23 16:13:00 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
4660f5f15d dropbox: make across config server side Move/Copy/DirMove fallback to copy
If we are doing a cross remote transfer then attempting a
Move/Copy/DirMove where we don't have permission gives
`from_lookup/not_found` errors.

This patch notices that error and only if we are doing a cross remote
transfer it engages the fallback where the file is streamed.

See: https://forum.rclone.org/t/dropbox-io-error-movedir-failed-from-lookup-not-found-when-try-to-move-copy-works/34088
2022-11-15 12:39:17 +00:00
6 changed files with 37 additions and 63 deletions

View File

@@ -1078,6 +1078,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
switch e := err.(type) {
case files.CopyV2APIError:
// If we are doing a cross remote transfer then from_lookup/not_found indicates we
// don't have permission to read the source, so engage the slow path
if srcObj.fs != f && e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
fs.Debugf(srcObj, "Copy failed attempting fallback: %v", err)
return nil, fs.ErrorCantCopy
}
}
return nil, fmt.Errorf("copy failed: %w", err)
}
@@ -1139,6 +1148,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return shouldRetry(ctx, err)
})
if err != nil {
switch e := err.(type) {
case files.MoveV2APIError:
// If we are doing a cross remote transfer then from_lookup/not_found indicates we
// don't have permission to read the source, so engage the slow path
if srcObj.fs != f && e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
fs.Debugf(srcObj, "Move failed attempting fallback: %v", err)
return nil, fs.ErrorCantMove
}
}
return nil, fmt.Errorf("move failed: %w", err)
}
@@ -1257,6 +1275,15 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return shouldRetry(ctx, err)
})
if err != nil {
switch e := err.(type) {
case files.MoveV2APIError:
// If we are doing a cross remote transfer then from_lookup/not_found indicates we
// don't have permission to read the source, so engage the slow path
if srcFs != f && e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
fs.Debugf(srcFs, "DirMove failed attempting fallback: %v", err)
return fs.ErrorCantDirMove
}
}
return fmt.Errorf("MoveDir failed: %w", err)
}

View File

@@ -103,10 +103,6 @@ func (f *Fs) getSessions() int32 {
// Open a new connection to the SMB server.
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple
// them from the current context
ctx = context.Background()
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err)

View File

@@ -663,4 +663,3 @@ put them back in again.` >}}
* techknowlogick <matti@mdranta.net>
* rkettelerij <richard@mindloops.nl>
* Kamui <fin-kamui@pm.me>
* asdffdsazqqq <90116442+asdffdsazqqq@users.noreply.github.com>

View File

@@ -108,14 +108,6 @@ possibilities. So, on Linux, you may end up with code similar to
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
Note: If the proxy server requires a username and password, then use
export http_proxy=http://username:password@proxyserver:12345
export https_proxy=$http_proxy
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
The `NO_PROXY` allows you to disable the proxy for specific hosts.
Hosts must be comma separated, and can contain domains or parts.
For instance "foo.com" also matches "bar.foo.com".

View File

@@ -618,11 +618,9 @@ func (c *Cache) purgeClean(quota int64) {
}
// Make a slice of clean cache files
now := time.Now()
for _, item := range c.item {
if !item.IsDirty() {
items = append(items, item)
item.updateScore(now)
}
}
@@ -711,11 +709,9 @@ func (c *Cache) purgeOverQuota(quota int64) {
var items Items
// Make a slice of unused files
now := time.Now()
for _, item := range c.item {
if !item.inUse() {
items = append(items, item)
item.updateScore(now)
}
}

View File

@@ -59,7 +59,6 @@ type Item struct {
mu sync.Mutex // protect the variables
cond sync.Cond // synchronize with cache cleaner
name string // name in the VFS
score float64 // score for likelihood of reaping from cache (bigger is more likely)
opens int // number of times file is open
downloaders *downloaders.Downloaders // a record of the downloaders in action - may be nil
o fs.Object // object we are caching - may be nil
@@ -76,7 +75,6 @@ type Info struct {
ModTime time.Time // last time file was modified
ATime time.Time // last time file was accessed
Size int64 // size of the file
Opens int64 // number of times the file has been opened
Rs ranges.Ranges // which parts of the file are present
Fingerprint string // fingerprint of remote object
Dirty bool // set if the backing file has been modified
@@ -105,8 +103,6 @@ func (rr ResetResult) String() string {
func (v Items) Len() int { return len(v) }
func (v Items) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
// Less implements the caching strategy of the VFS cache.
func (v Items) Less(i, j int) bool {
if i == j {
return false
@@ -118,7 +114,7 @@ func (v Items) Less(i, j int) bool {
jItem.mu.Lock()
defer jItem.mu.Unlock()
return iItem.score > jItem.score
return iItem.info.ATime.Before(jItem.info.ATime)
}
// clean the item after its cache file has been deleted
@@ -183,32 +179,6 @@ func (item *Item) getATime() time.Time {
return item.info.ATime
}
// update the score for the item and return it
//
// Bigger scores mean more likely to be reaped
func (item *Item) updateScore(now time.Time) float64 {
item.mu.Lock()
defer item.mu.Unlock()
accessedAgo := now.Sub(item.info.ATime).Seconds()
// For LRU cache, score is just how long ago it was accessed
// item.score = accessedAgo
// For LRU-SP cache score is size * accessedAgo / opens
opens := float64(item.opens)
if opens <= 1 {
opens = 1
}
size := float64(item.info.Rs.Size())
if size < 4096 {
size = 4096 // minimum size is 1 disk block ish
}
item.score = size * accessedAgo / opens
return item.score
}
// getDiskSize returns the size on disk (approximately) of the item
//
// We return the sizes of the chunks we have fetched, however there is
@@ -556,7 +526,6 @@ func (item *Item) open(o fs.Object) (err error) {
defer item.mu.Unlock()
item.info.ATime = time.Now()
item.info.Opens++
osPath, err := item.c.createItemDir(item.name) // No locking in Cache
if err != nil {
@@ -637,25 +606,20 @@ func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
item._updateFingerprint()
}
// Write the object back to the VFS layer before we mark it as
// clean, otherwise it will become eligible for removal which
// can cause a deadlock
if storeFn != nil && item.o != nil {
fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
// Write the object back to the VFS layer last with mutex unlocked
o := item.o
item.mu.Unlock()
storeFn(o)
item.mu.Lock()
}
// Show item is clean and is elegible for cache removal
item.info.Dirty = false
err = item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
}
if storeFn != nil && item.o != nil {
fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
// Write the object back to the VFS layer as last
// thing we do with mutex unlocked
o := item.o
item.mu.Unlock()
storeFn(o)
item.mu.Lock()
}
return nil
}