From d44957a09c9a6fe1e9bd5cdaf411b70ba652048f Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 24 Sep 2025 10:20:57 +0100 Subject: [PATCH] accounting: add SetMaxCompletedTransfers method to fix bisync race #8815 Before this change bisync adjusted the global MaxCompletedTransfers variable which caused races. This adds a SetMaxCompletedTransfers method and uses it in bisync. The MaxCompletedTransfers global becomes the default. This can be changed externally if rclone is in use as a library, and the commit history indicates that MaxCompletedTransfers was added for exactly this purpose so we try not to break it here. --- cmd/bisync/queue.go | 6 +-- fs/accounting/stats.go | 107 +++++++++++++++++++++++------------------ 2 files changed, 62 insertions(+), 51 deletions(-) diff --git a/cmd/bisync/queue.go b/cmd/bisync/queue.go index ed0257b5d..58588baef 100644 --- a/cmd/bisync/queue.go +++ b/cmd/bisync/queue.go @@ -245,10 +245,8 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib. } } - b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown - if accounting.MaxCompletedTransfers != -1 { - accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown - } + b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown + accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown ctxCopy, b.CancelSync = context.WithCancel(ctxCopy) b.testFn() err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index 6681764e5..fbc46bebb 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -22,48 +22,52 @@ const ( averageStopAfter = time.Minute ) -// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list +// MaxCompletedTransfers specifies the default maximum number of +// completed transfers in startedTransfers list. This can be adjusted +// for a given StatsInfo by calling the SetMaxCompletedTransfers +// method. var MaxCompletedTransfers = 100 // StatsInfo accounts all transfers // N.B.: if this struct is modified, please remember to also update sum() function in stats_groups // to correctly count the updated fields type StatsInfo struct { - mu sync.RWMutex - ctx context.Context - ci *fs.ConfigInfo - bytes int64 - errors int64 - lastError error - fatalError bool - retryError bool - retryAfter time.Time - checks int64 - checking *transferMap - checkQueue int - checkQueueSize int64 - transfers int64 - transferring *transferMap - transferQueue int - transferQueueSize int64 - listed int64 - renames int64 - renameQueue int - renameQueueSize int64 - deletes int64 - deletesSize int64 - deletedDirs int64 - inProgress *inProgress - startedTransfers []*Transfer // currently active transfers - oldTimeRanges timeRanges // a merged list of time ranges for the transfers - oldDuration time.Duration // duration of transfers we have culled - group string - startTime time.Time // the moment these stats were initialized or reset - average averageValues - serverSideCopies int64 - serverSideCopyBytes int64 - serverSideMoves int64 - serverSideMoveBytes int64 + mu sync.RWMutex + ctx context.Context + ci *fs.ConfigInfo + bytes int64 + errors int64 + lastError error + fatalError bool + retryError bool + retryAfter time.Time + checks int64 + checking *transferMap + checkQueue int + checkQueueSize int64 + transfers int64 + transferring *transferMap + transferQueue int + transferQueueSize int64 + listed int64 + renames int64 + renameQueue int + renameQueueSize int64 + deletes int64 + deletesSize int64 + deletedDirs int64 + inProgress *inProgress + startedTransfers []*Transfer // currently active transfers + oldTimeRanges timeRanges // a merged list of time ranges for the transfers + oldDuration time.Duration // duration of transfers we have culled + group string + startTime time.Time // the moment these stats were initialized or reset + average averageValues + serverSideCopies int64 + serverSideCopyBytes int64 + serverSideMoves int64 + serverSideMoveBytes int64 + maxCompletedTransfers int } type averageValues struct { @@ -81,17 +85,26 @@ type averageValues struct { func NewStats(ctx context.Context) *StatsInfo { ci := fs.GetConfig(ctx) s := &StatsInfo{ - ctx: ctx, - ci: ci, - checking: newTransferMap(ci.Checkers, "checking"), - transferring: newTransferMap(ci.Transfers, "transferring"), - inProgress: newInProgress(ctx), - startTime: time.Now(), - average: averageValues{}, + ctx: ctx, + ci: ci, + checking: newTransferMap(ci.Checkers, "checking"), + transferring: newTransferMap(ci.Transfers, "transferring"), + inProgress: newInProgress(ctx), + startTime: time.Now(), + average: averageValues{}, + maxCompletedTransfers: MaxCompletedTransfers, } return s } +// SetMaxCompletedTransfers sets the maximum number of completed transfers to keep. +func (s *StatsInfo) SetMaxCompletedTransfers(n int) *StatsInfo { + s.mu.Lock() + s.maxCompletedTransfers = n + s.mu.Unlock() + return s +} + // RemoteStats returns stats for rc // // If short is true then the transfers and checkers won't be added. @@ -914,13 +927,14 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) { // PruneTransfers makes sure there aren't too many old transfers by removing // a single finished transfer. Returns true if it removed a transfer. func (s *StatsInfo) PruneTransfers() bool { - if MaxCompletedTransfers < 0 { + s.mu.Lock() + defer s.mu.Unlock() + if s.maxCompletedTransfers < 0 { return false } removed := false - s.mu.Lock() // remove a transfer from the start if we are over quota - if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers { + if len(s.startedTransfers) > s.maxCompletedTransfers+s.ci.Transfers { for i, tr := range s.startedTransfers { if tr.IsDone() { s._removeTransfer(tr, i) @@ -929,7 +943,6 @@ func (s *StatsInfo) PruneTransfers() bool { } } } - s.mu.Unlock() return removed }