1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675

Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.

After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.

The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.

If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)

Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)

Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)

Many other details are explained in the included docs.
This commit is contained in:
nielash
2023-11-30 19:44:38 -05:00
parent d8e07bfd8e
commit b4216648e4
308 changed files with 5469 additions and 3243 deletions

View File

@@ -55,6 +55,7 @@ type queues struct {
// Bisync handles lock file, performs bisync run and checks exit status
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
defer resetGlobals()
opt := *optArg // ensure that input is never changed
b := &bisyncRun{
fs1: fs1,
@@ -71,13 +72,9 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
ci := fs.GetConfig(ctx)
opt.OrigBackupDir = ci.BackupDir
if !opt.DryRun && !opt.Force {
if fs1.Precision() == fs.ModTimeNotSupported {
return errors.New("modification time support is missing on path1")
}
if fs2.Precision() == fs.ModTimeNotSupported {
return errors.New("modification time support is missing on path2")
}
err = b.setCompareDefaults(ctx)
if err != nil {
return err
}
if b.workDir, err = filepath.Abs(opt.Workdir); err != nil {
@@ -389,10 +386,12 @@ func (b *bisyncRun) resync(octx, fctx context.Context) error {
var ls2 = newFileList()
err = ls1.save(fctx, b.newListing1)
if err != nil {
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
b.abort = true
}
err = ls2.save(fctx, b.newListing2)
if err != nil {
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
b.abort = true
}
@@ -519,6 +518,10 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
b.indent("ERROR", file, "Path1 file not found in Path2")
ok = false
} else {
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
ok = false
}
}
}
for _, file := range files2.list {
@@ -527,6 +530,7 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
ok = false
}
}
if !ok {
return errors.New("path1 and path2 are out of sync, run --resync to recover")
}
@@ -583,6 +587,7 @@ func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, re
}
if critical {
b.critical = true
b.abort = true
fs.Errorf(o, "%s: %v", msg, err)
} else {
fs.Infof(o, "%s: %v", msg, err)
@@ -603,3 +608,25 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath)
return ctx
}
// mainly to make sure tests don't interfere with each other when running more than one
func resetGlobals() {
downloadHash = false
logger = operations.NewLoggerOpt()
ignoreListingChecksum = false
ignoreListingModtime = false
hashTypes = nil
queueCI = nil
hashType = 0
fsrc, fdst = nil, nil
fcrypt = nil
Opt = Options{}
once = gosync.Once{}
downloadHashWarn = gosync.Once{}
firstDownloadHash = gosync.Once{}
ls1 = newFileList()
ls2 = newFileList()
err = nil
firstErr = nil
marchCtx = nil
}