mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 10:33:34 +00:00
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and would refuse to run if either path lacked modtime support. This made bisync unavailable for many of rclone's backends. Additionally, bisync did not account for the Fs's precision when comparing modtimes, meaning that they could only be reliably compared within the same side -- not against the opposite side. Size and checksum (even when available) were ignored completely for deltas. After this change, bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support. The comparison logic considers the backend's precision, hash types, and other features as appropriate. The comparison features optionally use a new --compare flag (which takes any combination of size,modtime,checksum) and even supports some combinations not otherwise supported in `sync` (like comparing all three at the same time.) By default (without the --compare flag), bisync inherits the same comparison options as `sync` (that is: size and modtime by default, unless modified with flags such as --checksum or --size-only.) If the --compare flag is set, it will override these defaults. If --compare includes checksum and both remotes support checksums but have no hash types in common with each other, checksums will be considered only for comparisons within the same side (to determine what has changed since the prior sync), but not for comparisons against the opposite side. If one side supports checksums and the other does not, checksums will only be considered on the side that supports them. When comparing with checksum and/or size without modtime, bisync cannot determine whether a file is newer or older -- only whether it is changed or unchanged. (If it is changed on both sides, bisync still does the standard equality-check to avoid declaring a sync conflict unless it absolutely has to.) Also included are some new flags to customize the checksum comparison behavior on backends where hashes are slow or unavailable. --no-slow-hash and --slow-hash-sync-only allow selectively ignoring checksums on backends such as local where they are slow. --download-hash allows computing them by downloading when (and only when) they're otherwise not available. Of course, this option probably won't be practical with large files, but may be a good option for syncing small-but-important files with maximum accuracy (for example, a source code repo on a crypt remote.) An additional advantage over methods like cryptcheck is that the original file is not required for comparison (for example, --download-hash can be used to bisync two different crypt remotes with different passwords.) Additionally, all of the above are now considered during the final --check-sync for much-improved accuracy (before this change, it only compared filenames!) Many other details are explained in the included docs.
This commit is contained in:
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
@@ -24,14 +25,17 @@ const (
|
||||
deltaNew delta = 1 << iota
|
||||
deltaNewer
|
||||
deltaOlder
|
||||
deltaSize
|
||||
deltaLarger
|
||||
deltaSmaller
|
||||
deltaHash
|
||||
deltaDeleted
|
||||
)
|
||||
|
||||
const (
|
||||
deltaModified delta = deltaNewer | deltaOlder | deltaSize | deltaHash | deltaDeleted
|
||||
deltaOther delta = deltaNew | deltaNewer | deltaOlder
|
||||
deltaSize delta = deltaLarger | deltaSmaller
|
||||
deltaTime delta = deltaNewer | deltaOlder
|
||||
deltaModified delta = deltaTime | deltaSize | deltaHash
|
||||
deltaOther delta = deltaNew | deltaTime | deltaSize | deltaHash
|
||||
)
|
||||
|
||||
func (d delta) is(cond delta) bool {
|
||||
@@ -41,6 +45,8 @@ func (d delta) is(cond delta) bool {
|
||||
// deltaSet
|
||||
type deltaSet struct {
|
||||
deltas map[string]delta
|
||||
size map[string]int64
|
||||
hash map[string]string
|
||||
opt *Options
|
||||
fs fs.Fs // base filesystem
|
||||
msg string // filesystem name for logging
|
||||
@@ -72,25 +78,71 @@ func (ds *deltaSet) printStats() {
|
||||
}
|
||||
nAll := len(ds.deltas)
|
||||
nNew := 0
|
||||
nMod := 0
|
||||
nTime := 0
|
||||
nNewer := 0
|
||||
nOlder := 0
|
||||
nSize := 0
|
||||
nLarger := 0
|
||||
nSmaller := 0
|
||||
nHash := 0
|
||||
nDeleted := 0
|
||||
for _, d := range ds.deltas {
|
||||
if d.is(deltaNew) {
|
||||
nNew++
|
||||
}
|
||||
if d.is(deltaModified) {
|
||||
nMod++
|
||||
}
|
||||
if d.is(deltaTime) {
|
||||
nTime++
|
||||
}
|
||||
if d.is(deltaNewer) {
|
||||
nNewer++
|
||||
}
|
||||
if d.is(deltaOlder) {
|
||||
nOlder++
|
||||
}
|
||||
if d.is(deltaSize) {
|
||||
nSize++
|
||||
}
|
||||
if d.is(deltaLarger) {
|
||||
nLarger++
|
||||
}
|
||||
if d.is(deltaSmaller) {
|
||||
nSmaller++
|
||||
}
|
||||
if d.is(deltaHash) {
|
||||
nHash++
|
||||
}
|
||||
if d.is(deltaDeleted) {
|
||||
nDeleted++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: %4d new, %4d newer, %4d older, %4d deleted",
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
if nAll != nNew+nMod+nDeleted {
|
||||
fs.Errorf(nil, "something doesn't add up! %4d != %4d + %4d + %4d", nAll, nNew, nMod, nDeleted)
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: "+Color(terminal.GreenFg, "%4d new")+", "+Color(terminal.YellowFg, "%4d modified")+", "+Color(terminal.RedFg, "%4d deleted"),
|
||||
ds.msg, nAll, nNew, nMod, nDeleted)
|
||||
if nMod > 0 {
|
||||
details := []string{}
|
||||
if nTime > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d newer"), nNewer))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d older"), nOlder))
|
||||
}
|
||||
if nSize > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d larger"), nLarger))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d smaller"), nSmaller))
|
||||
}
|
||||
if nHash > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d hash differs"), nHash))
|
||||
}
|
||||
if (nNewer+nOlder != nTime) || (nLarger+nSmaller != nSize) || (nMod > nTime+nSize+nHash) {
|
||||
fs.Errorf(nil, "something doesn't add up!")
|
||||
}
|
||||
|
||||
fs.Infof(nil, "(%s: %s)", Color(terminal.YellowFg, "Modified"), strings.Join(details, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
@@ -117,6 +169,8 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
|
||||
ds = &deltaSet{
|
||||
deltas: map[string]delta{},
|
||||
size: map[string]int64{},
|
||||
hash: map[string]string{},
|
||||
fs: f,
|
||||
msg: msg,
|
||||
oldCount: len(old.list),
|
||||
@@ -125,31 +179,70 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
}
|
||||
|
||||
for _, file := range old.list {
|
||||
// REMEMBER: this section is only concerned with comparing listings from the same side (not different sides)
|
||||
d := deltaZero
|
||||
s := int64(0)
|
||||
h := ""
|
||||
if !now.has(file) {
|
||||
b.indent(msg, file, "File was deleted")
|
||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else {
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
if !now.isDir(file) {
|
||||
if old.getTime(file) != now.getTime(file) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v", old.getTime(file), now.getTime(file))
|
||||
b.indent(msg, file, "File is newer")
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v", old.getTime(file), now.getTime(file))
|
||||
b.indent(msg, file, "File is OLDER")
|
||||
d |= deltaOlder
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
// TODO Compare sizes and hashes
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if d.is(deltaModified) {
|
||||
ds.deltas[file] = d
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = s
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = h
|
||||
}
|
||||
} else if d.is(deltaDeleted) {
|
||||
ds.deltas[file] = d
|
||||
} else {
|
||||
// Once we've found at least one unchanged file,
|
||||
// we know that not everything has changed,
|
||||
@@ -160,8 +253,14 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
||||
|
||||
for _, file := range now.list {
|
||||
if !old.has(file) {
|
||||
b.indent(msg, file, "File is new")
|
||||
b.indent(msg, file, Color(terminal.GreenFg, "File is new"))
|
||||
ds.deltas[file] = deltaNew
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = now.getSize(file)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = now.getHash(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,20 +333,28 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2, in2 := ds2.deltas[file]
|
||||
file2 := file
|
||||
if !in2 && file != alias {
|
||||
d2 = ds2.deltas[alias]
|
||||
file2 = alias
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
checkit := func(filename string) {
|
||||
if err := filterCheck.AddFile(filename); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename)
|
||||
// if size or hash differ, skip this, as we already know they're not equal
|
||||
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
||||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
||||
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
||||
} else {
|
||||
checkit := func(filename string) {
|
||||
if err := filterCheck.AddFile(filename); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename)
|
||||
}
|
||||
}
|
||||
checkit(file)
|
||||
if file != alias {
|
||||
checkit(alias)
|
||||
}
|
||||
}
|
||||
checkit(file)
|
||||
if file != alias {
|
||||
checkit(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -294,6 +401,17 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
// the Path1 version is deemed "correct" in this scenario
|
||||
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
||||
copy1to2.Add(file)
|
||||
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
||||
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
||||
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
|
||||
// Path2 is newer
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(ls2.getTryAlias(file, alias))
|
||||
} else {
|
||||
// Path1 is newer
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(ls1.getTryAlias(file, alias))
|
||||
}
|
||||
} else {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
renameSkipped.Add(file)
|
||||
|
||||
Reference in New Issue
Block a user