diff --git a/cmd/bisync/bisync_test.go b/cmd/bisync/bisync_test.go index b4ed0fe93..57adbd988 100644 --- a/cmd/bisync/bisync_test.go +++ b/cmd/bisync/bisync_test.go @@ -176,6 +176,7 @@ var ( // Flag -refresh-times helps with Dropbox tests failing with message // "src and dst identical but can't set mod time without deleting and re-uploading" argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)") + ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings") ) // bisyncTest keeps all test data in a single place @@ -264,6 +265,25 @@ func TestBisyncRemoteRemote(t *testing.T) { testBisync(t, remote, remote) } +// make sure rc can cope with running concurrent jobs +func TestBisyncConcurrent(t *testing.T) { + oldArgTestCase := argTestCase + *argTestCase = "basic" + *ignoreLogs = true // not useful to compare logs here because both runs will be logging at once + t.Cleanup(func() { + argTestCase = oldArgTestCase + *ignoreLogs = false + }) + + t.Run("test1", testParallel) + t.Run("test2", testParallel) +} + +func testParallel(t *testing.T) { + t.Parallel() + TestBisyncRemoteRemote(t) +} + // TestBisync is a test engine for bisync test cases. func testBisync(t *testing.T, path1, path2 string) { ctx := context.Background() @@ -1441,6 +1461,9 @@ func (b *bisyncTest) compareResults() int { resultText := b.mangleResult(b.workDir, file, false) if fileType(file) == "log" { + if *ignoreLogs { + continue + } // save mangled logs so difference is easier on eyes goldenFile := filepath.Join(b.logDir, "mangled.golden.log") resultFile := filepath.Join(b.logDir, "mangled.result.log") diff --git a/cmd/bisync/checkfn.go b/cmd/bisync/checkfn.go index 9b0b9f144..f2015cb11 100644 --- a/cmd/bisync/checkfn.go +++ b/cmd/bisync/checkfn.go @@ -16,15 +16,17 @@ import ( "github.com/rclone/rclone/fs/operations" ) -var hashType hash.Type -var fsrc, fdst fs.Fs -var fcrypt *crypt.Fs +type bisyncCheck = struct { + hashType hash.Type + fsrc, fdst fs.Fs + fcrypt *crypt.Fs +} // WhichCheck determines which CheckFn we should use based on the Fs types // It is more robust and accurate than Check because // it will fallback to CryptCheck or DownloadCheck instead of --size-only! // it returns the *operations.CheckOpt with the CheckFn set. -func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt { +func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt { ci := fs.GetConfig(ctx) common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes()) @@ -40,32 +42,32 @@ func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.Check if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) { // if both are crypt or only dst is crypt - hashType = FdstCrypt.UnWrap().Hashes().GetOne() - if hashType != hash.None { + b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne() + if b.check.hashType != hash.None { // use cryptcheck - fsrc = opt.Fsrc - fdst = opt.Fdst - fcrypt = FdstCrypt - fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") - opt.Check = CryptCheckFn + b.check.fsrc = opt.Fsrc + b.check.fdst = opt.Fdst + b.check.fcrypt = FdstCrypt + fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") + opt.Check = b.CryptCheckFn return opt } } else if srcIsCrypt && !dstIsCrypt { // if only src is crypt - hashType = FsrcCrypt.UnWrap().Hashes().GetOne() - if hashType != hash.None { + b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne() + if b.check.hashType != hash.None { // use reverse cryptcheck - fsrc = opt.Fdst - fdst = opt.Fsrc - fcrypt = FsrcCrypt - fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") - opt.Check = ReverseCryptCheckFn + b.check.fsrc = opt.Fdst + b.check.fdst = opt.Fsrc + b.check.fcrypt = FsrcCrypt + fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") + opt.Check = b.ReverseCryptCheckFn return opt } } // if we've gotten this far, neither check or cryptcheck will work, so use --download - fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)") + fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)") opt.Check = DownloadCheckFn return opt } @@ -88,17 +90,17 @@ func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, } // CryptCheckFn is a slightly modified version of CryptCheck -func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { +func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { cryptDst := dst.(*crypt.Object) underlyingDst := cryptDst.UnWrap() - underlyingHash, err := underlyingDst.Hash(ctx, hashType) + underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType) if err != nil { return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err) } if underlyingHash == "" { return false, true, nil } - cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType) + cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType) if err != nil { return true, false, fmt.Errorf("error computing hash: %w", err) } @@ -106,10 +108,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash return false, true, nil } if cryptHash != underlyingHash { - err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash) + err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash) fs.Debugf(src, "%s", err.Error()) // using same error msg as CheckFn so integration tests match - err = fmt.Errorf("%v differ", hashType) + err = fmt.Errorf("%v differ", b.check.hashType) fs.Errorf(src, "%s", err.Error()) return true, false, nil } @@ -118,8 +120,8 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash // ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched // result: src is crypt, dst is non-crypt -func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { - return CryptCheckFn(ctx, src, dst) +func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { + return b.CryptCheckFn(ctx, src, dst) } // DownloadCheckFn is a slightly modified version of Check with --download @@ -137,7 +139,7 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter if filterCheck.HaveFilesFrom() { fs.Debugf(nil, "There are potential conflicts to check.") - opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2) + opt, close, checkopterr := check.GetCheckOpt(fs1, fs2) if checkopterr != nil { b.critical = true b.retryable = true @@ -148,16 +150,16 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter opt.Match = new(bytes.Buffer) - opt = WhichCheck(ctxCheck, opt) + opt = b.WhichCheck(ctxCheck, opt) fs.Infof(nil, "Checking potential conflicts...") check := operations.CheckFn(ctxCheck, opt) fs.Infof(nil, "Finished checking the potential conflicts. %s", check) - //reset error count, because we don't want to count check errors as bisync errors + // reset error count, because we don't want to count check errors as bisync errors accounting.Stats(ctxCheck).ResetErrors() - //return the list of identical files to check against later + // return the list of identical files to check against later if len(fmt.Sprint(opt.Match)) > 0 { matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n")) } @@ -173,14 +175,14 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter // WhichEqual is similar to WhichCheck, but checks a single object. // Returns true if the objects are equal, false if they differ or if we don't know -func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool { +func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool { opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst) if checkopterr != nil { fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr) } defer close() - opt = WhichCheck(ctx, opt) + opt = b.WhichCheck(ctx, opt) differ, noHash, err := opt.Check(ctx, dst, src) if err != nil { fs.Errorf(src, "failed to check: %v", err) @@ -217,7 +219,7 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context { equal, skipHash = timeSizeEqualFn() if equal && !skipHash { whichHashType := func(f fs.Info) hash.Type { - ht := getHashType(f.Name()) + ht := b.getHashType(f.Name()) if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync { ht = f.Hashes().GetOne() } @@ -225,9 +227,9 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context { } srcHash, _ := src.Hash(ctx, whichHashType(src.Fs())) dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs())) - srcHash, _ = tryDownloadHash(ctx, src, srcHash) - dstHash, _ = tryDownloadHash(ctx, dst, dstHash) - equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size()) + srcHash, _ = b.tryDownloadHash(ctx, src, srcHash) + dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash) + equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size()) } if equal { logger(ctx, operations.Match, src, dst, nil) diff --git a/cmd/bisync/cmd.go b/cmd/bisync/cmd.go index 8ec2d393a..fd1f35e17 100644 --- a/cmd/bisync/cmd.go +++ b/cmd/bisync/cmd.go @@ -115,6 +115,7 @@ func (x *CheckSyncMode) Type() string { } // Opt keeps command line options +// internal functions should use b.opt instead var Opt Options func init() { diff --git a/cmd/bisync/compare.go b/cmd/bisync/compare.go index 73ced57af..33079d5f7 100644 --- a/cmd/bisync/compare.go +++ b/cmd/bisync/compare.go @@ -28,7 +28,7 @@ type CompareOpt = struct { DownloadHash bool } -func (b *bisyncRun) setCompareDefaults(ctx context.Context) error { +func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) { ci := fs.GetConfig(ctx) // defaults @@ -120,25 +120,25 @@ func sizeDiffers(a, b int64) bool { // returns true if the hashes are definitely different. // returns false if equal, or if either is unknown. -func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool { - if a == "" || b == "" { +func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool { + if stringA == "" || stringB == "" { if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) { - fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) + fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB) } return false } if ht1 != ht2 { - if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) { + if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) { fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) return false } } - return a != b + return stringA != stringB } // chooses hash type, giving priority to types both sides have in common func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) { - downloadHash = b.opt.Compare.DownloadHash + b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected { fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.") } else { @@ -268,13 +268,15 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error { return nil } -// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable -var downloadHash bool -var downloadHashWarn mutex.Once -var firstDownloadHash mutex.Once +// b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable +type downloadHashOpt struct { + downloadHash bool + downloadHashWarn mutex.Once + firstDownloadHash mutex.Once +} -func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) { - if hashVal != "" || !downloadHash { +func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) { + if hashVal != "" || !b.downloadHashOpt.downloadHash { return hashVal, nil } obj, ok := o.(fs.Object) @@ -283,14 +285,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string return hashVal, fs.ErrorObjectNotFound } if o.Size() < 0 { - downloadHashWarn.Do(func() { + b.downloadHashOpt.downloadHashWarn.Do(func() { fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) }) fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.") return hashVal, hash.ErrUnsupported } - firstDownloadHash.Do(func() { + b.downloadHashOpt.firstDownloadHash.Do(func() { fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) }) tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash") diff --git a/cmd/bisync/deltas.go b/cmd/bisync/deltas.go index 2fba379c6..b882fb564 100644 --- a/cmd/bisync/deltas.go +++ b/cmd/bisync/deltas.go @@ -219,7 +219,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string, } } if b.opt.Compare.Checksum { - if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) { + if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) { fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file)) whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash")) d |= deltaHash @@ -346,7 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result if d2.is(deltaOther) { // if size or hash differ, skip this, as we already know they're not equal if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) || - (b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) { + (b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) { fs.Debugf(file, "skipping equality check as size/hash definitely differ") } else { checkit := func(filename string) { @@ -393,10 +393,10 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result // if files are identical, leave them alone instead of renaming if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) { fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file) - ls1.getPut(file, skippedDirs1) - ls2.getPut(file, skippedDirs2) + b.march.ls1.getPut(file, skippedDirs1) + b.march.ls2.getPut(file, skippedDirs2) b.debugFn(file, func() { - b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName))) + b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, b.march.ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName))) }) } else { equal := matches.Has(file) @@ -409,16 +409,16 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result // the Path1 version is deemed "correct" in this scenario fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file) copy1to2.Add(file) - } else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) { + } else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) { fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)") - if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) { + if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) { // Path2 is newer b.indent("Path2", p1, "Queue copy to Path1") - copy2to1.Add(ls2.getTryAlias(file, alias)) + copy2to1.Add(b.march.ls2.getTryAlias(file, alias)) } else { // Path1 is newer b.indent("Path1", p2, "Queue copy to Path2") - copy1to2.Add(ls1.getTryAlias(file, alias)) + copy1to2.Add(b.march.ls1.getTryAlias(file, alias)) } } else { fs.Infof(nil, "Files are equal! Skipping: %s", file) @@ -590,10 +590,10 @@ func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) { fullMap1 := map[string]string{} // [transformedname]originalname fullMap2 := map[string]string{} // [transformedname]originalname - for _, name := range ls1.list { + for _, name := range b.march.ls1.list { fullMap1[transform(name)] = name } - for _, name := range ls2.list { + for _, name := range b.march.ls2.list { fullMap2[transform(name)] = name } diff --git a/cmd/bisync/listing.go b/cmd/bisync/listing.go index 1aa1e9a62..02f332a77 100644 --- a/cmd/bisync/listing.go +++ b/cmd/bisync/listing.go @@ -202,8 +202,8 @@ func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool equal = false } } - if b.opt.Compare.Checksum && !ignoreListingChecksum { - if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) { + if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum { + if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) { b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2))) equal = false } @@ -745,7 +745,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, if hashType != hash.None { hashVal, _ = obj.Hash(ctxRecheck, hashType) } - hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal) + hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal) } var modtime time.Time if b.opt.Compare.Modtime { @@ -759,7 +759,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, for _, dstObj := range dstObjs { if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) { // note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't. - if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) { + if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) { putObj(srcObj, srcList) putObj(dstObj, dstList) resolved = append(resolved, srcObj.Remote()) @@ -773,7 +773,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, // skip and error during --resync, as rollback is not possible if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun { if b.opt.Resync { - err = errors.New("no dstObj match or files not equal") + err := errors.New("no dstObj match or files not equal") b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false) } else { toRollback = append(toRollback, srcObj.Remote()) diff --git a/cmd/bisync/lockfile.go b/cmd/bisync/lockfile.go index 90e8c2166..e294ff0e8 100644 --- a/cmd/bisync/lockfile.go +++ b/cmd/bisync/lockfile.go @@ -16,16 +16,17 @@ import ( const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour) -var stopRenewal func() +type lockFileOpt struct { + stopRenewal func() + data struct { + Session string + PID string + TimeRenewed time.Time + TimeExpires time.Time + } +} -var data = struct { - Session string - PID string - TimeRenewed time.Time - TimeExpires time.Time -}{} - -func (b *bisyncRun) setLockFile() error { +func (b *bisyncRun) setLockFile() (err error) { b.lockFile = "" b.setLockFileExpiration() if !b.opt.DryRun { @@ -45,24 +46,23 @@ func (b *bisyncRun) setLockFile() error { } fs.Debugf(nil, "Lock file created: %s", b.lockFile) b.renewLockFile() - stopRenewal = b.startLockRenewal() + b.lockFileOpt.stopRenewal = b.startLockRenewal() } return nil } -func (b *bisyncRun) removeLockFile() { +func (b *bisyncRun) removeLockFile() (err error) { if b.lockFile != "" { - stopRenewal() - errUnlock := os.Remove(b.lockFile) - if errUnlock == nil { + b.lockFileOpt.stopRenewal() + err = os.Remove(b.lockFile) + if err == nil { fs.Debugf(nil, "Lock file removed: %s", b.lockFile) - } else if err == nil { - err = errUnlock } else { - fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock) + fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err) } b.lockFile = "" // block removing it again } + return err } func (b *bisyncRun) setLockFileExpiration() { @@ -77,18 +77,18 @@ func (b *bisyncRun) setLockFileExpiration() { func (b *bisyncRun) renewLockFile() { if b.lockFile != "" && bilib.FileExists(b.lockFile) { - data.Session = b.basePath - data.PID = strconv.Itoa(os.Getpid()) - data.TimeRenewed = time.Now() - data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock)) + b.lockFileOpt.data.Session = b.basePath + b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid()) + b.lockFileOpt.data.TimeRenewed = time.Now() + b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock)) // save data file df, err := os.Create(b.lockFile) b.handleErr(b.lockFile, "error renewing lock file", err, true, true) - b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true) + b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true) b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true) if b.opt.MaxLock < basicallyforever { - fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires) + fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires) } } } @@ -99,7 +99,7 @@ func (b *bisyncRun) lockFileIsExpired() bool { b.handleErr(b.lockFile, "error reading lock file", err, true, true) dec := json.NewDecoder(rdf) for { - if err := dec.Decode(&data); err != nil { + if err := dec.Decode(&b.lockFileOpt.data); err != nil { if err != io.EOF { fs.Errorf(b.lockFile, "err: %v", err) } @@ -107,14 +107,14 @@ func (b *bisyncRun) lockFileIsExpired() bool { } } b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true) - if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) { - fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires) + if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) { + fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires) markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync) markFailed(b.listing2) return true } - fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second)) - prettyprint(data, "Lockfile info", fs.LogLevelInfo) + fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second)) + prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo) } return false } diff --git a/cmd/bisync/march.go b/cmd/bisync/march.go index 60263e2d5..d2a3ecae9 100644 --- a/cmd/bisync/march.go +++ b/cmd/bisync/march.go @@ -12,18 +12,20 @@ import ( "github.com/rclone/rclone/fs/march" ) -var ls1 = newFileList() -var ls2 = newFileList() -var err error -var firstErr error -var marchAliasLock sync.Mutex -var marchLsLock sync.Mutex -var marchErrLock sync.Mutex -var marchCtx context.Context +type bisyncMarch struct { + ls1 *fileList + ls2 *fileList + err error + firstErr error + marchAliasLock sync.Mutex + marchLsLock sync.Mutex + marchErrLock sync.Mutex + marchCtx context.Context +} func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) { ci := fs.GetConfig(ctx) - marchCtx = ctx + b.march.marchCtx = ctx b.setupListing() fs.Debugf(b, "starting to march!") @@ -39,31 +41,31 @@ func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, NoCheckDest: false, NoUnicodeNormalization: ci.NoUnicodeNormalization, } - err = m.Run(ctx) + b.march.err = m.Run(ctx) - fs.Debugf(b, "march completed. err: %v", err) - if err == nil { - err = firstErr + fs.Debugf(b, "march completed. err: %v", b.march.err) + if b.march.err == nil { + b.march.err = b.march.firstErr } - if err != nil { - b.handleErr("march", "error during march", err, true, true) + if b.march.err != nil { + b.handleErr("march", "error during march", b.march.err, true, true) b.abort = true - return ls1, ls2, err + return b.march.ls1, b.march.ls2, b.march.err } // save files - if b.opt.Compare.DownloadHash && ls1.hash == hash.None { - ls1.hash = hash.MD5 + if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None { + b.march.ls1.hash = hash.MD5 } - if b.opt.Compare.DownloadHash && ls2.hash == hash.None { - ls2.hash = hash.MD5 + if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None { + b.march.ls2.hash = hash.MD5 } - err = ls1.save(ctx, b.newListing1) - b.handleErr(ls1, "error saving ls1 from march", err, true, true) - err = ls2.save(ctx, b.newListing2) - b.handleErr(ls2, "error saving ls2 from march", err, true, true) + b.march.err = b.march.ls1.save(ctx, b.newListing1) + b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true) + b.march.err = b.march.ls2.save(ctx, b.newListing2) + b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true) - return ls1, ls2, err + return b.march.ls1, b.march.ls2, b.march.err } // SrcOnly have an object which is on path1 only @@ -83,9 +85,9 @@ func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) { // Match is called when object exists on both path1 and path2 (whether equal or not) func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) { fs.Debugf(o1, "both path1 and path2") - marchAliasLock.Lock() + b.march.marchAliasLock.Lock() b.aliases.Add(o1.Remote(), o2.Remote()) - marchAliasLock.Unlock() + b.march.marchAliasLock.Unlock() b.parse(o1, true) b.parse(o2, false) return isDir(o1) @@ -119,76 +121,76 @@ func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) { } func (b *bisyncRun) setupListing() { - ls1 = newFileList() - ls2 = newFileList() + b.march.ls1 = newFileList() + b.march.ls2 = newFileList() // note that --ignore-listing-checksum is different from --ignore-checksum // and we already checked it when we set b.opt.Compare.HashType1 and 2 - ls1.hash = b.opt.Compare.HashType1 - ls2.hash = b.opt.Compare.HashType2 + b.march.ls1.hash = b.opt.Compare.HashType1 + b.march.ls2.hash = b.opt.Compare.HashType2 } func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) { - tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1)) + tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1)) defer func() { - tr.Done(marchCtx, nil) + tr.Done(b.march.marchCtx, nil) }() var ( hashVal string hashErr error ) - ls := whichLs(isPath1) + ls := b.whichLs(isPath1) hashType := ls.hash if hashType != hash.None { - hashVal, hashErr = o.Hash(marchCtx, hashType) - marchErrLock.Lock() - if firstErr == nil { - firstErr = hashErr + hashVal, hashErr = o.Hash(b.march.marchCtx, hashType) + b.march.marchErrLock.Lock() + if b.march.firstErr == nil { + b.march.firstErr = hashErr } - marchErrLock.Unlock() + b.march.marchErrLock.Unlock() } - hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal) - marchErrLock.Lock() - if firstErr == nil { - firstErr = hashErr + hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal) + b.march.marchErrLock.Lock() + if b.march.firstErr == nil { + b.march.firstErr = hashErr } - if firstErr != nil { - b.handleErr(hashType, "error hashing during march", firstErr, false, true) + if b.march.firstErr != nil { + b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true) } - marchErrLock.Unlock() + b.march.marchErrLock.Unlock() var modtime time.Time if b.opt.Compare.Modtime { - modtime = o.ModTime(marchCtx).In(TZ) + modtime = o.ModTime(b.march.marchCtx).In(TZ) } id := "" // TODO: ID(o) flags := "-" // "-" for a file and "d" for a directory - marchLsLock.Lock() + b.march.marchLsLock.Lock() ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags) - marchLsLock.Unlock() + b.march.marchLsLock.Unlock() } func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) { - tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1)) + tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1)) defer func() { - tr.Done(marchCtx, nil) + tr.Done(b.march.marchCtx, nil) }() - ls := whichLs(isPath1) + ls := b.whichLs(isPath1) var modtime time.Time if b.opt.Compare.Modtime { - modtime = o.ModTime(marchCtx).In(TZ) + modtime = o.ModTime(b.march.marchCtx).In(TZ) } id := "" // TODO flags := "d" // "-" for a file and "d" for a directory - marchLsLock.Lock() + b.march.marchLsLock.Lock() ls.put(o.Remote(), -1, modtime, "", id, flags) - marchLsLock.Unlock() + b.march.marchLsLock.Unlock() } -func whichLs(isPath1 bool) *fileList { - ls := ls1 +func (b *bisyncRun) whichLs(isPath1 bool) *fileList { + ls := b.march.ls1 if !isPath1 { - ls = ls2 + ls = b.march.ls2 } return ls } @@ -206,7 +208,7 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true) b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true) ci := fs.GetConfig(ctxCheckFile) - marchCtx = ctxCheckFile + b.march.marchCtx = ctxCheckFile b.setupListing() fs.Debugf(b, "starting to march!") @@ -223,18 +225,18 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e NoCheckDest: false, NoUnicodeNormalization: ci.NoUnicodeNormalization, } - err = m.Run(ctxCheckFile) + b.march.err = m.Run(ctxCheckFile) - fs.Debugf(b, "march completed. err: %v", err) - if err == nil { - err = firstErr + fs.Debugf(b, "march completed. err: %v", b.march.err) + if b.march.err == nil { + b.march.err = b.march.firstErr } - if err != nil { - b.handleErr("march", "error during findCheckFiles", err, true, true) + if b.march.err != nil { + b.handleErr("march", "error during findCheckFiles", b.march.err, true, true) b.abort = true } - return ls1, ls2, err + return b.march.ls1, b.march.ls2, b.march.err } // ID returns the ID of the Object if known, or "" if not diff --git a/cmd/bisync/operations.go b/cmd/bisync/operations.go index 6af40a2e2..e458fff60 100644 --- a/cmd/bisync/operations.go +++ b/cmd/bisync/operations.go @@ -51,6 +51,11 @@ type bisyncRun struct { lockFile string renames renames resyncIs1to2 bool + march bisyncMarch + check bisyncCheck + queueOpt bisyncQueueOpt + downloadHashOpt downloadHashOpt + lockFileOpt lockFileOpt } type queues struct { @@ -64,7 +69,6 @@ type queues struct { // Bisync handles lock file, performs bisync run and checks exit status func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { - defer resetGlobals() opt := *optArg // ensure that input is never changed b := &bisyncRun{ fs1: fs1, @@ -124,6 +128,8 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { return err } + b.queueOpt.logger = operations.NewLoggerOpt() + // Handle SIGINT var finaliseOnce gosync.Once @@ -161,7 +167,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { markFailed(b.listing1) markFailed(b.listing2) } - b.removeLockFile() + err = b.removeLockFile() } }) } @@ -171,7 +177,10 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { // run bisync err = b.runLocked(ctx) - b.removeLockFile() + removeLockErr := b.removeLockFile() + if err == nil { + err = removeLockErr + } b.CleanupCompleted = true if b.InGracefulShutdown { @@ -297,7 +306,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) { } fs.Infof(nil, "Building Path1 and Path2 listings") - ls1, ls2, err = b.makeMarchListing(fctx) + b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx) if err != nil || accounting.Stats(fctx).Errored() { fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) b.critical = true @@ -307,7 +316,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) { // Check for Path1 deltas relative to the prior sync fs.Infof(nil, "Path1 checking for diffs") - ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1") + ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1") if err != nil { return err } @@ -315,7 +324,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) { // Check for Path2 deltas relative to the prior sync fs.Infof(nil, "Path2 checking for diffs") - ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2") + ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2") if err != nil { return err } @@ -389,7 +398,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) { newl1, _ := b.loadListing(b.newListing1) newl2, _ := b.loadListing(b.newListing2) b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName))) - b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName))) + b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName))) } b.saveOldListings() // save new listings @@ -553,7 +562,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont return ctx } -func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error { +func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) { if operations.OverlappingFilterCheck(fctx, fs2, fs1) { err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters.")) return err @@ -586,7 +595,7 @@ func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) return nil } -func (b *bisyncRun) checkSyntax() error { +func (b *bisyncRun) checkSyntax() (err error) { // check for odd number of quotes in path, usually indicating an escaping issue path1 := bilib.FsPath(b.fs1) path2 := bilib.FsPath(b.fs2) @@ -634,25 +643,3 @@ func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) { } return false } - -// mainly to make sure tests don't interfere with each other when running more than one -func resetGlobals() { - downloadHash = false - logger = operations.NewLoggerOpt() - ignoreListingChecksum = false - ignoreListingModtime = false - hashTypes = nil - queueCI = nil - hashType = 0 - fsrc, fdst = nil, nil - fcrypt = nil - Opt = Options{} - once = gosync.Once{} - downloadHashWarn = gosync.Once{} - firstDownloadHash = gosync.Once{} - ls1 = newFileList() - ls2 = newFileList() - err = nil - firstErr = nil - marchCtx = nil -} diff --git a/cmd/bisync/queue.go b/cmd/bisync/queue.go index e299f80f7..f618619ff 100644 --- a/cmd/bisync/queue.go +++ b/cmd/bisync/queue.go @@ -51,19 +51,19 @@ func (rs *ResultsSlice) has(name string) bool { return false } -var ( - logger = operations.NewLoggerOpt() +type bisyncQueueOpt struct { + logger operations.LoggerOpt lock mutex.Mutex once mutex.Once ignoreListingChecksum bool ignoreListingModtime bool hashTypes map[string]hash.Type queueCI *fs.ConfigInfo -) +} // allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2 -func getHashType(fname string) hash.Type { - ht, ok := hashTypes[fname] +func (b *bisyncRun) getHashType(fname string) hash.Type { + ht, ok := b.queueOpt.hashTypes[fname] if ok { return ht } @@ -106,9 +106,9 @@ func altName(name string, src, dst fs.DirEntry) string { } // WriteResults is Bisync's LoggerFn -func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { - lock.Lock() - defer lock.Unlock() +func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { + b.queueOpt.lock.Lock() + defer b.queueOpt.lock.Unlock() opt := operations.GetLoggerOpt(ctx) result := Results{ @@ -131,14 +131,14 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn result.Flags = "-" if side != nil { result.Size = side.Size() - if !ignoreListingModtime { + if !b.queueOpt.ignoreListingModtime { result.Modtime = side.ModTime(ctx).In(TZ) } - if !ignoreListingChecksum { + if !b.queueOpt.ignoreListingChecksum { sideObj, ok := side.(fs.ObjectInfo) if ok { - result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name())) - result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash) + result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name())) + result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash) } } @@ -159,8 +159,8 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn } prettyprint(result, "writing result", fs.LogLevelDebug) - if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) { - once.Do(func() { + if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) { + b.queueOpt.once.Do(func() { fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) }) } @@ -189,14 +189,14 @@ func ReadResults(results io.Reader) []Results { // for setup code shared by both fastCopy and resyncDir func (b *bisyncRun) preCopy(ctx context.Context) context.Context { - queueCI = fs.GetConfig(ctx) - ignoreListingChecksum = b.opt.IgnoreListingChecksum - ignoreListingModtime = !b.opt.Compare.Modtime - hashTypes = map[string]hash.Type{ + b.queueOpt.queueCI = fs.GetConfig(ctx) + b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum + b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime + b.queueOpt.hashTypes = map[string]hash.Type{ b.fs1.Name(): b.opt.Compare.HashType1, b.fs2.Name(): b.opt.Compare.HashType2, } - logger.LoggerFn = WriteResults + b.queueOpt.logger.LoggerFn = b.WriteResults overridingEqual := false if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash { overridingEqual = true @@ -209,15 +209,15 @@ func (b *bisyncRun) preCopy(ctx context.Context) context.Context { fs.Debugf(nil, "overriding equal") ctx = b.EqualFn(ctx) } - ctxCopyLogger := operations.WithSyncLogger(ctx, logger) + ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger) if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected { // set here in case !b.opt.Compare.Modtime - queueCI = fs.GetConfig(ctxCopyLogger) + b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger) if b.opt.Compare.NoSlowHash { - queueCI.CheckSum = false + b.queueOpt.queueCI.CheckSum = false } if b.opt.Compare.SlowHashSyncOnly && !overridingEqual { - queueCI.CheckSum = true + b.queueOpt.queueCI.CheckSum = true } } return ctxCopyLogger @@ -250,9 +250,9 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib. ctxCopy, b.CancelSync = context.WithCancel(ctxCopy) b.testFn() err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) - prettyprint(logger, "logger", fs.LogLevelDebug) + prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug) - getResults := ReadResults(logger.JSON) + getResults := ReadResults(b.queueOpt.logger.JSON) fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName) lineFormat := "%s %8d %s %s %s %q\n" @@ -292,9 +292,9 @@ func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results, ctx = b.preCopy(ctx) err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs) - prettyprint(logger, "logger", fs.LogLevelDebug) + prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug) - getResults := ReadResults(logger.JSON) + getResults := ReadResults(b.queueOpt.logger.JSON) fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync") return getResults, err diff --git a/cmd/bisync/resolve.go b/cmd/bisync/resolve.go index 21624580d..d40ddb5e6 100644 --- a/cmd/bisync/resolve.go +++ b/cmd/bisync/resolve.go @@ -135,7 +135,7 @@ type namePair struct { newName string } -func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error { +func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) { winningPath := 0 if b.opt.ConflictResolve != PreferNone { winningPath = b.conflictWinner(ds1, ds2, file, alias) @@ -261,15 +261,15 @@ func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int { for i := startnum; i < math.MaxInt; i++ { iStr := fmt.Sprint(i) - if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && - !ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) && - !ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && - !ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) { + if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && + !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) && + !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && + !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) { // make sure it still holds true with suffixes switched (it should) - if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && - !ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) && - !ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && - !ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) { + if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && + !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) && + !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && + !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) { fs.Debugf(file, "The first available suffix is: %s", iStr) return i } @@ -280,10 +280,10 @@ func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias stri // like numerate, but consider only one side's suffix (for when suffixes are different) func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int { - lsA, lsB := ls1, ls2 + lsA, lsB := b.march.ls1, b.march.ls2 suffix := b.opt.ConflictSuffix1 if path == 2 { - lsA, lsB = ls2, ls1 + lsA, lsB = b.march.ls2, b.march.ls1 suffix = b.opt.ConflictSuffix2 } for i := startnum; i < math.MaxInt; i++ { @@ -299,7 +299,7 @@ func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alia return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems } -func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error { +func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) { if winningPath == thisPathNum { b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum)) } else { @@ -321,7 +321,7 @@ func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, return nil } -func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error { +func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) (err error) { skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete") if !skip { b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum)) diff --git a/cmd/bisync/resync.go b/cmd/bisync/resync.go index 4606a43e1..6df61089a 100644 --- a/cmd/bisync/resync.go +++ b/cmd/bisync/resync.go @@ -41,12 +41,12 @@ func (b *bisyncRun) setResyncDefaults() { // It will generate path1 and path2 listings, // copy any unique files to the opposite path, // and resolve any differing files according to the --resync-mode. -func (b *bisyncRun) resync(octx, fctx context.Context) error { +func (b *bisyncRun) resync(octx, fctx context.Context) (err error) { fs.Infof(nil, "Copying Path2 files to Path1") // Save blank filelists (will be filled from sync results) - var ls1 = newFileList() - var ls2 = newFileList() + ls1 := newFileList() + ls2 := newFileList() err = ls1.save(fctx, b.newListing1) if err != nil { b.handleErr(ls1, "error saving ls1 from resync", err, true, true) diff --git a/docs/content/bisync.md b/docs/content/bisync.md index e36988c37..957314f44 100644 --- a/docs/content/bisync.md +++ b/docs/content/bisync.md @@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general. ## Changelog +### `v1.71` +* Fixed an issue causing errors when running concurrent bisync runs through the `rc`. + ### `v1.69.1` * Fixed an issue causing listings to not capture concurrent modifications under certain conditions