1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-03 18:23:30 +00:00

Compare commits

..

6 Commits

Author SHA1 Message Date
Nick Craig-Wood
6f53463c31 vfs: implement LRU-SP cache for more intelligent cache replacement - FIXME WIP
FIXME this needs docs, and maybe needs to be configurable.
FIXME needs tests also

See: https://forum.rclone.org/t/rclone-vfs-cache-maximum-size-per-file/30037
Fixes: #4110
2022-11-16 11:12:31 +00:00
Nick Craig-Wood
705e8f2fe0 smb: fix Failed to sync: context canceled at the end of syncs
Before this change we were putting connections into the connection
pool which had a local context in.

This meant that when the operation had finished the context was
cancelled and the connection became unusable.

See: https://forum.rclone.org/t/failed-to-sync-context-canceled/34017/
2022-11-16 10:55:25 +00:00
Nick Craig-Wood
591fc3609a vfs: fix deadlock caused by cache cleaner and upload finishing
Before this patch a deadlock could occur if the cache cleaner was
running when an object upload finished.

This fixes the problem by delaying marking the object as clean until
we have notified the VFS layer. This means that the cache cleaner
won't consider the object until **after** the VFS layer has been
notified, thus avoiding the deadlock.

See: https://forum.rclone.org/t/rclone-mount-deadlock-when-dir-cache-time-strikes/33486/
2022-11-15 18:01:36 +00:00
Nick Craig-Wood
b4a3d1b9ed Add asdffdsazqqq to contributors 2022-11-15 18:01:36 +00:00
asdffdsazqqq
84219b95ab docs: faq: how to use a proxy server that requires a username and password - fixes #6565 2022-11-15 17:58:43 +00:00
Nick Craig-Wood
2c78f56d48 webdav: fix Move/Copy/DirMove when using -server-side-across-configs
Before this change, when using -server-side-across-configs rclone
would direct Move/Copy/DirMove to the destination server.

However this should be directed to the source server. This is a little
unclear in the RFC, but the name of the parameter "Destination:" seems
clear and this is how dCache and Rucio have implemented it.

See: https://forum.rclone.org/t/webdav-copy-request-implemented-incorrectly/34072/
2022-11-15 09:51:30 +00:00
7 changed files with 71 additions and 18 deletions

View File

@@ -103,6 +103,10 @@ func (f *Fs) getSessions() int32 {
// Open a new connection to the SMB server.
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple
// them from the current context
ctx = context.Background()
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err)

View File

@@ -991,6 +991,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
}
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
dstPath := f.filePath(remote)
err := f.mkParentDir(ctx, dstPath)
if err != nil {
@@ -1013,9 +1014,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
if f.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("Copy call failed: %w", err)
@@ -1109,9 +1111,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
"Overwrite": "F",
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("DirMove MOVE call failed: %w", err)

View File

@@ -663,3 +663,4 @@ put them back in again.` >}}
* techknowlogick <matti@mdranta.net>
* rkettelerij <richard@mindloops.nl>
* Kamui <fin-kamui@pm.me>
* asdffdsazqqq <90116442+asdffdsazqqq@users.noreply.github.com>

View File

@@ -108,6 +108,14 @@ possibilities. So, on Linux, you may end up with code similar to
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
Note: If the proxy server requires a username and password, then use
export http_proxy=http://username:password@proxyserver:12345
export https_proxy=$http_proxy
export HTTP_PROXY=$http_proxy
export HTTPS_PROXY=$http_proxy
The `NO_PROXY` allows you to disable the proxy for specific hosts.
Hosts must be comma separated, and can contain domains or parts.
For instance "foo.com" also matches "bar.foo.com".

View File

@@ -618,9 +618,11 @@ func (c *Cache) purgeClean(quota int64) {
}
// Make a slice of clean cache files
now := time.Now()
for _, item := range c.item {
if !item.IsDirty() {
items = append(items, item)
item.updateScore(now)
}
}
@@ -709,9 +711,11 @@ func (c *Cache) purgeOverQuota(quota int64) {
var items Items
// Make a slice of unused files
now := time.Now()
for _, item := range c.item {
if !item.inUse() {
items = append(items, item)
item.updateScore(now)
}
}

View File

@@ -288,10 +288,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
// The window includes potentially unread data in the buffer
//window := int64(fs.GetConfig(context.TODO()).BufferSize)
// FIXME test disable --buffer-size window
window := int64(0)
window := int64(fs.GetConfig(context.TODO()).BufferSize)
// Increase the read range by the read ahead if set
if dls.opt.ReadAhead > 0 {

View File

@@ -59,6 +59,7 @@ type Item struct {
mu sync.Mutex // protect the variables
cond sync.Cond // synchronize with cache cleaner
name string // name in the VFS
score float64 // score for likelihood of reaping from cache (bigger is more likely)
opens int // number of times file is open
downloaders *downloaders.Downloaders // a record of the downloaders in action - may be nil
o fs.Object // object we are caching - may be nil
@@ -75,6 +76,7 @@ type Info struct {
ModTime time.Time // last time file was modified
ATime time.Time // last time file was accessed
Size int64 // size of the file
Opens int64 // number of times the file has been opened
Rs ranges.Ranges // which parts of the file are present
Fingerprint string // fingerprint of remote object
Dirty bool // set if the backing file has been modified
@@ -103,6 +105,8 @@ func (rr ResetResult) String() string {
func (v Items) Len() int { return len(v) }
func (v Items) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
// Less implements the caching strategy of the VFS cache.
func (v Items) Less(i, j int) bool {
if i == j {
return false
@@ -114,7 +118,7 @@ func (v Items) Less(i, j int) bool {
jItem.mu.Lock()
defer jItem.mu.Unlock()
return iItem.info.ATime.Before(jItem.info.ATime)
return iItem.score > jItem.score
}
// clean the item after its cache file has been deleted
@@ -179,6 +183,32 @@ func (item *Item) getATime() time.Time {
return item.info.ATime
}
// update the score for the item and return it
//
// Bigger scores mean more likely to be reaped
func (item *Item) updateScore(now time.Time) float64 {
item.mu.Lock()
defer item.mu.Unlock()
accessedAgo := now.Sub(item.info.ATime).Seconds()
// For LRU cache, score is just how long ago it was accessed
// item.score = accessedAgo
// For LRU-SP cache score is size * accessedAgo / opens
opens := float64(item.opens)
if opens <= 1 {
opens = 1
}
size := float64(item.info.Rs.Size())
if size < 4096 {
size = 4096 // minimum size is 1 disk block ish
}
item.score = size * accessedAgo / opens
return item.score
}
// getDiskSize returns the size on disk (approximately) of the item
//
// We return the sizes of the chunks we have fetched, however there is
@@ -526,6 +556,7 @@ func (item *Item) open(o fs.Object) (err error) {
defer item.mu.Unlock()
item.info.ATime = time.Now()
item.info.Opens++
osPath, err := item.c.createItemDir(item.name) // No locking in Cache
if err != nil {
@@ -606,20 +637,25 @@ func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
item._updateFingerprint()
}
item.info.Dirty = false
err = item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
}
// Write the object back to the VFS layer before we mark it as
// clean, otherwise it will become eligible for removal which
// can cause a deadlock
if storeFn != nil && item.o != nil {
fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
// Write the object back to the VFS layer as last
// thing we do with mutex unlocked
// Write the object back to the VFS layer last with mutex unlocked
o := item.o
item.mu.Unlock()
storeFn(o)
item.mu.Lock()
}
// Show item is clean and is elegible for cache removal
item.info.Dirty = false
err = item._save()
if err != nil {
fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
}
return nil
}