1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-31 16:53:44 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
dc78afb1b3 oauthutil: warn if rclone authorize code can't be decoded #2732 2025-08-01 15:37:53 +01:00
147 changed files with 2605 additions and 3679 deletions

View File

@@ -29,12 +29,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -45,14 +45,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -61,14 +61,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -78,14 +78,14 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.24
- job_name: go1.23
os: ubuntu-latest
go: '1.24'
go: '1.23'
quicktest: true
racequicktest: true
@@ -95,7 +95,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -216,7 +216,7 @@ jobs:
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -224,7 +224,7 @@ jobs:
id: setup-go
uses: actions/setup-go@v5
with:
go-version: '1.24'
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
@@ -304,7 +304,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -312,7 +312,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.25.0-rc.1'
go-version: '>=1.24.0-rc.1'
- name: Set global environment variables
run: |

View File

@@ -52,7 +52,7 @@ jobs:
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -30,7 +30,7 @@ jobs:
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -633,7 +633,6 @@ You'll need to modify the following files
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`

View File

@@ -39,7 +39,6 @@ directories to and from different cloud storage providers.
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/)

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob
@@ -51,7 +51,6 @@ import (
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
@@ -2671,13 +2670,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
@@ -2765,8 +2757,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
blockList blockblob.GetBlockListResponse
properties *blob.GetPropertiesResponse
options *blockblob.CommitBlockListOptions
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
)
properties, err = o.readMetaDataAlways(ctx)
@@ -2778,7 +2768,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
if objectExists {
// Get the committed block list
err = pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
return o.fs.shouldRetry(ctx, err)
})
@@ -2820,7 +2810,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
// Commit only the committed blocks
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
err = pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
return o.fs.shouldRetry(ctx, err)
})

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package azureblob

View File

@@ -1,6 +1,6 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package azureblob

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js || wasm
//go:build plan9 || solaris || js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles
@@ -453,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
options := azidentity.AzureCLICredentialOptions{}
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
@@ -550,7 +550,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
b2i := map[bool]int{false: 0, true: 1}
var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set")
@@ -583,6 +583,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
@@ -854,7 +855,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return entries, err
}
opt := &directory.ListFilesAndDirectoriesOptions{
var opt = &directory.ListFilesAndDirectoriesOptions{
Include: directory.ListFilesInclude{
Timestamps: true,
},
@@ -1013,10 +1014,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
SMBProperties: &file.SMBProperties{
LastWriteTime: &t,
},
HTTPHeaders: &file.HTTPHeaders{
ContentMD5: o.md5,
ContentType: &o.contentType,
},
}
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
if err != nil {

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package azurefiles

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package azurefiles

View File

@@ -1,7 +1,7 @@
// Build for azurefiles for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js || wasm
//go:build plan9 || js
// Package azurefiles provides an interface to Microsoft Azure Files
package azurefiles

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm && !race
//go:build !plan9 && !js && !race
package cache_test

View File

@@ -1,6 +1,6 @@
// Test Cache filesystem interface
//go:build !plan9 && !js && !wasm && !race
//go:build !plan9 && !js && !race
package cache_test

View File

@@ -1,7 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js || wasm
//go:build plan9 || js
// Package cache implements a virtual provider to cache existing remotes.
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm && !race
//go:build !plan9 && !js && !race
package cache_test

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package cache

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1339,7 +1339,7 @@ func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
for _, v := range seg {
newValues = append(newValues, url.QueryEscape(v))
newValues = append(newValues, url.PathEscape(v))
}
return strings.Join(newValues, "/")
}

View File

@@ -1,4 +1,4 @@
//go:build windows || plan9 || js || wasm || linux
//go:build windows || plan9 || js || linux
package local

View File

@@ -1,4 +1,4 @@
//go:build !windows && !plan9 && !js && !wasm && !linux
//go:build !windows && !plan9 && !js && !linux
package local

View File

@@ -1,4 +1,4 @@
//go:build plan9 || js || wasm
//go:build plan9 || js
package local

View File

@@ -1,4 +1,4 @@
//go:build !windows && !plan9 && !js && !wasm
//go:build !windows && !plan9 && !js
package local

View File

@@ -671,12 +671,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
symlinkFlag := os.ModeSymlink
if runtime.GOOS == "windows" {
symlinkFlag |= os.ModeIrregular
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
@@ -698,13 +694,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
}
// Don't include non directory if not included

View File

@@ -1,4 +1,4 @@
//go:build dragonfly || plan9 || js || wasm
//go:build dragonfly || plan9 || js
package local

View File

@@ -1,4 +1,4 @@
//go:build !windows && !plan9 && !js && !wasm
//go:build !windows && !plan9 && !js
package local

View File

@@ -1,4 +1,4 @@
//go:build windows || plan9 || js || wasm
//go:build windows || plan9 || js
package local

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage
@@ -12,7 +12,6 @@ import (
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
@@ -34,46 +33,9 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as opc-meta- keys.`,
},
})
}
var systemMetadataInfo = map[string]fs.MetadataHelp{
"opc-meta-mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"opc-meta-uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-atime": {
Help: "Time of last access",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-mtime": {
Help: "Time of last modification",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-btime": {
Help: "Time of file birth (creation)",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
@@ -120,7 +82,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMetadata: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
@@ -727,38 +688,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return list.Flush()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx)
if err != nil {
return nil, err
}
metadata = make(fs.Metadata, len(o.meta)+7)
for k, v := range o.meta {
switch k {
case metaMtime:
if modTime, err := swift.FloatStringToTime(v); err == nil {
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
}
case metaMD5Hash:
// don't write hash metadata
default:
metadata[k] = v
}
}
if o.mimeType != "" {
metadata["content-type"] = o.mimeType
}
if !o.lastModified.IsZero() {
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
}
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1,7 +1,7 @@
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js || wasm
//go:build plan9 || solaris || js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !solaris && !js && !wasm
//go:build !plan9 && !solaris && !js
package oracleobjectstorage

View File

@@ -1508,30 +1508,8 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
}
if new.File == nil {
return nil, fmt.Errorf("invalid response: %+v", new)
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if new.Task != nil {
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
}
})()
// Note: The API might automatically append a numbered suffix to the filename,
// even if a file with the same name does not exist in the target directory.
if upName := f.opt.Enc.ToStandardName(new.File.Name); leaf != upName {
return nil, fserrors.NoRetryError(fmt.Errorf("uploaded file name mismatch: expected %q, got %q", leaf, upName))
}
// early return; in case of zero-byte objects or uploaded by matched gcid
if new.File.Phase == api.PhaseTypeComplete {
} else if new.File.Phase == api.PhaseTypeComplete {
// early return; in case of zero-byte objects
if acc, ok := in.(*accounting.Account); ok && acc != nil {
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
@@ -1541,6 +1519,18 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
return new.File, nil
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
})()
if uploadType == api.UploadTypeForm && new.Form != nil {
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
@@ -1552,9 +1542,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
if err != nil {
return nil, fmt.Errorf("failed to upload: %w", err)
}
if new.Task == nil {
return new.File, nil
}
return new.File, f.waitTask(ctx, new.Task.ID)
}

View File

@@ -1,5 +1,3 @@
//go:build !wasm
// Package protondrive implements the Proton Drive backend
package protondrive

View File

@@ -1,5 +1,3 @@
//go:build !wasm
package protondrive_test
import (

View File

@@ -1,7 +0,0 @@
// Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build wasm
// Package protondrive implements the Proton Drive backend
package protondrive

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/

View File

@@ -1,6 +1,6 @@
// Test QingStor filesystem interface
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package qingstor

View File

@@ -1,7 +1,7 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js || wasm
//go:build plan9 || js
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/

View File

@@ -1,6 +1,6 @@
// Upload object to QingStor
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package qingstor

View File

@@ -2549,11 +2549,6 @@ See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/lates
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}, {
Name: "use_arn_region",
Help: `If true, enables arn region support for the service.`,
Default: false,
Advanced: true,
}, {
Name: "leave_parts_on_error",
Provider: "AWS",
@@ -3101,7 +3096,6 @@ type Options struct {
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
UseARNRegion bool `config:"use_arn_region"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
ListChunk int32 `config:"list_chunk"`
ListVersion int `config:"list_version"`
@@ -3466,7 +3460,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
options = append(options, func(s3Opt *s3.Options) {
s3Opt.UsePathStyle = opt.ForcePathStyle
s3Opt.UseAccelerate = opt.UseAccelerateEndpoint
s3Opt.UseARNRegion = opt.UseARNRegion
// FIXME maybe this should be a tristate so can default to DualStackEndpointStateUnset?
if opt.UseDualStack {
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled
@@ -3635,7 +3628,7 @@ func setQuirks(opt *Options) {
useUnsignedPayload = false // AWS has trailer support which means it adds checksums in the trailer without seeking
case "Alibaba":
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
useAlreadyExists = true // returns 200 OK
useAlreadyExists = false // returns BucketAlreadyExists
case "HuaweiOBS":
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
urlEncodeListings = false
@@ -6093,7 +6086,7 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
o.storageClass = stringClone(string(resp.StorageClass))
o.cacheControl = stringClonePointer(resp.CacheControl)
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
o.contentEncoding = stringClonePointer(removeAWSChunked(resp.ContentEncoding))
o.contentEncoding = stringClonePointer(resp.ContentEncoding)
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
// If decompressing then size and md5sum are unknown
@@ -6161,36 +6154,6 @@ func (o *Object) Storable() bool {
return true
}
// removeAWSChunked removes the "aws-chunked" content-coding from a
// Content-Encoding field value (RFC 9110). Comparison is case-insensitive.
// Returns nil if encoding is empty after removal.
func removeAWSChunked(pv *string) *string {
if pv == nil {
return nil
}
v := *pv
if v == "" {
return nil
}
if !strings.Contains(strings.ToLower(v), "aws-chunked") {
return pv
}
parts := strings.Split(v, ",")
out := make([]string, 0, len(parts))
for _, p := range parts {
tok := strings.TrimSpace(p)
if tok == "" || strings.EqualFold(tok, "aws-chunked") {
continue
}
out = append(out, tok)
}
if len(out) == 0 {
return nil
}
v = strings.Join(out, ",")
return &v
}
func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.fs.opt.DownloadURL + bucketPath
var resp *http.Response
@@ -6359,7 +6322,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.setMetaData(&head)
// Decompress body if necessary
if deref(removeAWSChunked(resp.ContentEncoding)) == "gzip" {
if deref(resp.ContentEncoding) == "gzip" {
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
return readers.NewGzipReader(resp.Body)
}
@@ -6783,7 +6746,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = removeAWSChunked(pv)
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
@@ -6880,7 +6843,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
case "content-disposition":
ui.req.ContentDisposition = aws.String(value)
case "content-encoding":
ui.req.ContentEncoding = removeAWSChunked(aws.String(value))
ui.req.ContentEncoding = aws.String(value)
case "content-language":
ui.req.ContentLanguage = aws.String(value)
case "content-type":

View File

@@ -248,47 +248,6 @@ func TestMergeDeleteMarkers(t *testing.T) {
}
}
func TestRemoveAWSChunked(t *testing.T) {
ps := func(s string) *string {
return &s
}
tests := []struct {
name string
in *string
want *string
}{
{"nil", nil, nil},
{"empty", ps(""), nil},
{"only aws", ps("aws-chunked"), nil},
{"leading aws", ps("aws-chunked, gzip"), ps("gzip")},
{"trailing aws", ps("gzip, aws-chunked"), ps("gzip")},
{"middle aws", ps("gzip, aws-chunked, br"), ps("gzip,br")},
{"case insensitive", ps("GZip, AwS-ChUnKeD, Br"), ps("GZip,Br")},
{"duplicates", ps("aws-chunked , aws-chunked"), nil},
{"no aws normalize spaces", ps(" gzip , br "), ps(" gzip , br ")},
{"surrounding spaces", ps(" aws-chunked "), nil},
{"no change", ps("gzip, br"), ps("gzip, br")},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := removeAWSChunked(tc.in)
check := func(want, got *string) {
t.Helper()
if tc.want == nil {
assert.Nil(t, got)
} else {
require.NotNil(t, got)
assert.Equal(t, *tc.want, *got)
}
}
check(tc.want, got)
// Idempotent
got2 := removeAWSChunked(got)
check(got, got2)
})
}
}
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
// Package sftp provides a filesystem interface using github.com/pkg/sftp
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,6 +1,6 @@
// Test Sftp filesystem interface
//go:build !plan9 && !wasm
//go:build !plan9
package sftp_test

View File

@@ -1,7 +1,7 @@
// Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || wasm
//go:build plan9
// Package sftp provides a filesystem interface using github.com/pkg/sftp
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,99 +0,0 @@
package smb
import (
"context"
"fmt"
"os"
"sync"
"github.com/cloudsoda/go-smb2"
"golang.org/x/sync/errgroup"
)
// FsInterface defines the methods that filePool needs from Fs
type FsInterface interface {
getConnection(ctx context.Context, share string) (*conn, error)
putConnection(pc **conn, err error)
removeSession()
}
type file struct {
*smb2.File
c *conn
}
type filePool struct {
ctx context.Context
fs FsInterface
share string
path string
mu sync.Mutex
pool []*file
}
func newFilePool(ctx context.Context, fs FsInterface, share, path string) *filePool {
return &filePool{
ctx: ctx,
fs: fs,
share: share,
path: path,
}
}
func (p *filePool) get() (*file, error) {
p.mu.Lock()
if len(p.pool) > 0 {
f := p.pool[len(p.pool)-1]
p.pool = p.pool[:len(p.pool)-1]
p.mu.Unlock()
return f, nil
}
p.mu.Unlock()
c, err := p.fs.getConnection(p.ctx, p.share)
if err != nil {
return nil, err
}
fl, err := c.smbShare.OpenFile(p.path, os.O_WRONLY, 0o644)
if err != nil {
p.fs.putConnection(&c, err)
return nil, fmt.Errorf("failed to open: %w", err)
}
return &file{File: fl, c: c}, nil
}
func (p *filePool) put(f *file, err error) {
if f == nil {
return
}
if err != nil {
_ = f.Close()
p.fs.putConnection(&f.c, err)
return
}
p.mu.Lock()
p.pool = append(p.pool, f)
p.mu.Unlock()
}
func (p *filePool) drain() error {
p.mu.Lock()
files := p.pool
p.pool = nil
p.mu.Unlock()
g, _ := errgroup.WithContext(p.ctx)
for _, f := range files {
g.Go(func() error {
err := f.Close()
p.fs.putConnection(&f.c, err)
return err
})
}
return g.Wait()
}

View File

@@ -1,228 +0,0 @@
package smb
import (
"context"
"errors"
"sync"
"testing"
"github.com/cloudsoda/go-smb2"
"github.com/stretchr/testify/assert"
)
// Mock Fs that implements FsInterface
type mockFs struct {
mu sync.Mutex
putConnectionCalled bool
putConnectionErr error
getConnectionCalled bool
getConnectionErr error
getConnectionResult *conn
removeSessionCalled bool
}
func (m *mockFs) putConnection(pc **conn, err error) {
m.mu.Lock()
defer m.mu.Unlock()
m.putConnectionCalled = true
m.putConnectionErr = err
}
func (m *mockFs) getConnection(ctx context.Context, share string) (*conn, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.getConnectionCalled = true
if m.getConnectionErr != nil {
return nil, m.getConnectionErr
}
if m.getConnectionResult != nil {
return m.getConnectionResult, nil
}
return &conn{}, nil
}
func (m *mockFs) removeSession() {
m.mu.Lock()
defer m.mu.Unlock()
m.removeSessionCalled = true
}
func (m *mockFs) isPutConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionCalled
}
func (m *mockFs) getPutConnectionErr() error {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionErr
}
func (m *mockFs) isGetConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.getConnectionCalled
}
func newMockFs() *mockFs {
return &mockFs{}
}
// Helper function to create a mock file
func newMockFile() *file {
return &file{
File: &smb2.File{},
c: &conn{},
}
}
// Test filePool creation
func TestNewFilePool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
share := "testshare"
path := "/test/path"
pool := newFilePool(ctx, fs, share, path)
assert.NotNil(t, pool)
assert.Equal(t, ctx, pool.ctx)
assert.Equal(t, fs, pool.fs)
assert.Equal(t, share, pool.share)
assert.Equal(t, path, pool.path)
assert.Empty(t, pool.pool)
}
// Test getting file from pool when pool has files
func TestFilePool_Get_FromPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add a mock file to the pool
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
// Get file from pool
f, err := pool.get()
assert.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, mockFile, f)
assert.Empty(t, pool.pool)
}
// Test getting file when pool is empty
func TestFilePool_Get_EmptyPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
// Set up the mock to return an error from getConnection
// This tests that the pool calls getConnection when empty
fs.getConnectionErr = errors.New("connection failed")
pool := newFilePool(ctx, fs, "testshare", "test/path")
// This should call getConnection and return the error
f, err := pool.get()
assert.Error(t, err)
assert.Nil(t, f)
assert.True(t, fs.isGetConnectionCalled())
assert.Equal(t, "connection failed", err.Error())
}
// Test putting file successfully
func TestFilePool_Put_Success(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, nil)
assert.Len(t, pool.pool, 1)
assert.Equal(t, mockFile, pool.pool[0])
}
// Test putting file with error
func TestFilePool_Put_WithError(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, errors.New("write error"))
// Should call putConnection with error
assert.True(t, fs.isPutConnectionCalled())
assert.Equal(t, errors.New("write error"), fs.getPutConnectionErr())
assert.Empty(t, pool.pool)
}
// Test putting nil file
func TestFilePool_Put_NilFile(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Should not panic
pool.put(nil, nil)
pool.put(nil, errors.New("some error"))
assert.Empty(t, pool.pool)
}
// Test draining pool with files
func TestFilePool_Drain_WithFiles(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add mock files to pool
mockFile1 := newMockFile()
mockFile2 := newMockFile()
pool.pool = append(pool.pool, mockFile1, mockFile2)
// Before draining
assert.Len(t, pool.pool, 2)
_ = pool.drain()
assert.Empty(t, pool.pool)
}
// Test concurrent access to pool
func TestFilePool_ConcurrentAccess(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
const numGoroutines = 10
for i := 0; i < numGoroutines; i++ {
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
}
// Test concurrent get operations
done := make(chan bool, numGoroutines)
for i := 0; i < numGoroutines; i++ {
go func() {
defer func() { done <- true }()
f, err := pool.get()
if err == nil {
pool.put(f, nil)
}
}()
}
for i := 0; i < numGoroutines; i++ {
<-done
}
// Pool should be in a consistent after the concurrence access
assert.Len(t, pool.pool, numGoroutines)
}

View File

@@ -3,7 +3,6 @@ package smb
import (
"context"
"errors"
"fmt"
"io"
"os"
@@ -504,73 +503,13 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return usage, nil
}
type smbWriterAt struct {
pool *filePool
closed bool
closeMu sync.Mutex
wg sync.WaitGroup
}
func (w *smbWriterAt) WriteAt(p []byte, off int64) (int, error) {
w.closeMu.Lock()
if w.closed {
w.closeMu.Unlock()
return 0, errors.New("writer already closed")
}
w.wg.Add(1)
w.closeMu.Unlock()
defer w.wg.Done()
f, err := w.pool.get()
if err != nil {
return 0, fmt.Errorf("failed to get file from pool: %w", err)
}
n, writeErr := f.WriteAt(p, off)
w.pool.put(f, writeErr)
if writeErr != nil {
return n, fmt.Errorf("failed to write at offset %d: %w", off, writeErr)
}
return n, writeErr
}
func (w *smbWriterAt) Close() error {
w.closeMu.Lock()
defer w.closeMu.Unlock()
if w.closed {
return nil
}
w.closed = true
// Wait for all pending writes to finish
w.wg.Wait()
var errs []error
// Drain the pool
if err := w.pool.drain(); err != nil {
errs = append(errs, fmt.Errorf("failed to drain file pool: %w", err))
}
// Remove session
w.pool.fs.removeSession()
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
var err error
o := &Object{
fs: f,
remote: remote,
@@ -580,42 +519,27 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, fs.ErrorIsDir
}
err := o.fs.ensureDirectory(ctx, share, filename)
err = o.fs.ensureDirectory(ctx, share, filename)
if err != nil {
return nil, fmt.Errorf("failed to make parent directories: %w", err)
}
smbPath := o.fs.toSambaPath(filename)
filename = o.fs.toSambaPath(filename)
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
// One-time truncate
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return nil, err
}
file, err := cn.smbShare.OpenFile(smbPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
o.fs.putConnection(&cn, err)
return nil, err
return nil, fmt.Errorf("failed to open: %w", err)
}
if size > 0 {
if truncateErr := file.Truncate(size); truncateErr != nil {
_ = file.Close()
o.fs.putConnection(&cn, truncateErr)
return nil, fmt.Errorf("failed to truncate file: %w", truncateErr)
}
}
if closeErr := file.Close(); closeErr != nil {
o.fs.putConnection(&cn, closeErr)
return nil, fmt.Errorf("failed to close file after truncate: %w", closeErr)
}
o.fs.putConnection(&cn, nil)
// Add a new session
o.fs.addSession()
return &smbWriterAt{
pool: newFilePool(ctx, o.fs, share, smbPath),
}, nil
return fl, nil
}
// Shutdown the backend, closing any background tasks and any

View File

@@ -73,8 +73,7 @@ var osarches = []string{
"plan9/386",
"plan9/amd64",
"solaris/amd64",
"js/wasm",
"wasip1/wasm",
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
}
// Special environment flags for a given arch

View File

@@ -176,7 +176,6 @@ var (
// Flag -refresh-times helps with Dropbox tests failing with message
// "src and dst identical but can't set mod time without deleting and re-uploading"
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
)
// bisyncTest keeps all test data in a single place
@@ -227,18 +226,6 @@ var color = bisync.Color
// TestMain drives the tests
func TestMain(m *testing.M) {
bisync.LogTZ = time.UTC
ci := fs.GetConfig(context.TODO())
ciSave := *ci
defer func() {
*ci = ciSave
}()
// need to set context.TODO() here as we cannot pass a ctx to fs.LogLevelPrintf
ci.LogLevel = fs.LogLevelInfo
if *argDebug {
ci.LogLevel = fs.LogLevelDebug
}
fstest.Initialise()
fstest.TestMain(m)
}
@@ -251,8 +238,7 @@ func TestBisyncRemoteLocal(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, remote, *argRemote2)
testBisync(t, remote, *argRemote2)
}
// Path1 is local, Path2 is remote
@@ -264,8 +250,7 @@ func TestBisyncLocalRemote(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, *argRemote2, remote)
testBisync(t, *argRemote2, remote)
}
// Path1 and Path2 are both different directories on remote
@@ -275,34 +260,14 @@ func TestBisyncRemoteRemote(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, remote, remote)
}
// make sure rc can cope with running concurrent jobs
func TestBisyncConcurrent(t *testing.T) {
if !isLocal(*fstest.RemoteName) {
t.Skip("TestBisyncConcurrent is skipped on non-local")
}
oldArgTestCase := argTestCase
*argTestCase = "basic"
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
t.Cleanup(func() {
argTestCase = oldArgTestCase
*ignoreLogs = false
})
t.Run("test1", testParallel)
t.Run("test2", testParallel)
}
func testParallel(t *testing.T) {
t.Parallel()
TestBisyncRemoteRemote(t)
testBisync(t, remote, remote)
}
// TestBisync is a test engine for bisync test cases.
func testBisync(ctx context.Context, t *testing.T, path1, path2 string) {
func testBisync(t *testing.T, path1, path2 string) {
ctx := context.Background()
fstest.Initialise()
ci := fs.GetConfig(ctx)
ciSave := *ci
defer func() {
@@ -311,9 +276,8 @@ func testBisync(ctx context.Context, t *testing.T, path1, path2 string) {
if *argRefreshTimes {
ci.RefreshTimes = true
}
bisync.ColorsLock.Lock()
bisync.Colors = true
bisync.ColorsLock.Unlock()
time.Local = bisync.TZ
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
baseDir, err := os.Getwd()
@@ -599,15 +563,11 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
}
}
func isLocal(remote string) bool {
return bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",")
}
// makeTempRemote creates temporary folder and makes a filesystem
// if a local path is provided, it's ignored (the test will run under system temp)
func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string) (f, parent fs.Fs, path, canon string) {
var err error
if isLocal(remote) {
if bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",") {
if remote != "" && !strings.HasPrefix(remote, "local") && *fstest.RemoteName != "" {
b.t.Fatalf(`Missing ":" in remote %q. Use "local" to test with local filesystem.`, remote)
}
@@ -638,8 +598,13 @@ func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string)
}
func (b *bisyncTest) cleanupCase(ctx context.Context) {
_ = operations.Purge(ctx, b.fs1, "")
_ = operations.Purge(ctx, b.fs2, "")
// Silence "directory not found" errors from the ftp backend
_ = bilib.CaptureOutput(func() {
_ = operations.Purge(ctx, b.fs1, "")
})
_ = bilib.CaptureOutput(func() {
_ = operations.Purge(ctx, b.fs2, "")
})
_ = os.RemoveAll(b.workDir)
accounting.Stats(ctx).ResetCounters()
}
@@ -654,6 +619,11 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
defer func() {
*ci = ciSave
}()
ci.LogLevel = fs.LogLevelInfo
if b.debug {
ci.LogLevel = fs.LogLevelDebug
}
testFunc := func() {
src := filepath.Join(b.dataDir, "file7.txt")
@@ -983,12 +953,6 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
b.fs2.Features().Disable("Copy") // API has longstanding bug for conflictBehavior=replace https://github.com/rclone/rclone/issues/4590
b.fs2.Features().Disable("Move")
}
if strings.HasPrefix(b.fs1.String(), "sftp") {
b.fs1.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
}
if strings.HasPrefix(b.fs2.String(), "sftp") {
b.fs2.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
}
if strings.Contains(strings.ToLower(fs.ConfigString(b.fs1)), "mailru") || strings.Contains(strings.ToLower(fs.ConfigString(b.fs2)), "mailru") {
fs.GetConfig(ctx).TPSLimit = 10 // https://github.com/rclone/rclone/issues/7768#issuecomment-2060888980
}
@@ -1011,23 +975,17 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
obj, err := f.Put(ctx, in, objinfo)
require.NoError(b.t, err)
if !f.Features().IsLocal {
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
}
err = obj.SetModTime(ctx, initDate)
if err == fs.ErrorCantSetModTime {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
}
if !f.Features().IsLocal {
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
if b.testCase != "nomodtime" {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
}
}
err = obj.Remove(ctx)
require.NoError(b.t, err)
}
if b.testCase != "nomodtime" {
testSetModtime(b.fs1)
testSetModtime(b.fs2)
}
testSetModtime(b.fs1)
testSetModtime(b.fs2)
if b.testCase == "normalization" || b.testCase == "extended_char_paths" || b.testCase == "extended_filenames" {
// test whether remote is capable of running test
@@ -1471,9 +1429,6 @@ func (b *bisyncTest) compareResults() int {
resultText := b.mangleResult(b.workDir, file, false)
if fileType(file) == "log" {
if *ignoreLogs {
continue
}
// save mangled logs so difference is easier on eyes
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
resultFile := filepath.Join(b.logDir, "mangled.result.log")

View File

@@ -16,17 +16,15 @@ import (
"github.com/rclone/rclone/fs/operations"
)
type bisyncCheck = struct {
hashType hash.Type
fsrc, fdst fs.Fs
fcrypt *crypt.Fs
}
var hashType hash.Type
var fsrc, fdst fs.Fs
var fcrypt *crypt.Fs
// WhichCheck determines which CheckFn we should use based on the Fs types
// It is more robust and accurate than Check because
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
// it returns the *operations.CheckOpt with the CheckFn set.
func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
ci := fs.GetConfig(ctx)
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
@@ -42,32 +40,32 @@ func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *o
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
// if both are crypt or only dst is crypt
b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne()
if b.check.hashType != hash.None {
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None {
// use cryptcheck
b.check.fsrc = opt.Fsrc
b.check.fdst = opt.Fdst
b.check.fcrypt = FdstCrypt
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = b.CryptCheckFn
fsrc = opt.Fsrc
fdst = opt.Fdst
fcrypt = FdstCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = CryptCheckFn
return opt
}
} else if srcIsCrypt && !dstIsCrypt {
// if only src is crypt
b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
if b.check.hashType != hash.None {
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None {
// use reverse cryptcheck
b.check.fsrc = opt.Fdst
b.check.fdst = opt.Fsrc
b.check.fcrypt = FsrcCrypt
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = b.ReverseCryptCheckFn
fsrc = opt.Fdst
fdst = opt.Fsrc
fcrypt = FsrcCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = ReverseCryptCheckFn
return opt
}
}
// if we've gotten this far, neither check or cryptcheck will work, so use --download
fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
opt.Check = DownloadCheckFn
return opt
}
@@ -90,17 +88,17 @@ func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool,
}
// CryptCheckFn is a slightly modified version of CryptCheck
func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
cryptDst := dst.(*crypt.Object)
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType)
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
if err != nil {
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
}
if underlyingHash == "" {
return false, true, nil
}
cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType)
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
if err != nil {
return true, false, fmt.Errorf("error computing hash: %w", err)
}
@@ -108,10 +106,10 @@ func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (diffe
return false, true, nil
}
if cryptHash != underlyingHash {
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash)
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.Debugf(src, "%s", err.Error())
// using same error msg as CheckFn so integration tests match
err = fmt.Errorf("%v differ", b.check.hashType)
err = fmt.Errorf("%v differ", hashType)
fs.Errorf(src, "%s", err.Error())
return true, false, nil
}
@@ -120,8 +118,8 @@ func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (diffe
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
// result: src is crypt, dst is non-crypt
func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
return b.CryptCheckFn(ctx, src, dst)
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
return CryptCheckFn(ctx, src, dst)
}
// DownloadCheckFn is a slightly modified version of Check with --download
@@ -139,7 +137,7 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
if filterCheck.HaveFilesFrom() {
fs.Debugf(nil, "There are potential conflicts to check.")
opt, close, checkopterr := check.GetCheckOpt(fs1, fs2)
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
if checkopterr != nil {
b.critical = true
b.retryable = true
@@ -150,16 +148,16 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
opt.Match = new(bytes.Buffer)
opt = b.WhichCheck(ctxCheck, opt)
opt = WhichCheck(ctxCheck, opt)
fs.Infof(nil, "Checking potential conflicts...")
check := operations.CheckFn(ctxCheck, opt)
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
// reset error count, because we don't want to count check errors as bisync errors
//reset error count, because we don't want to count check errors as bisync errors
accounting.Stats(ctxCheck).ResetErrors()
// return the list of identical files to check against later
//return the list of identical files to check against later
if len(fmt.Sprint(opt.Match)) > 0 {
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
}
@@ -175,14 +173,14 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
// WhichEqual is similar to WhichCheck, but checks a single object.
// Returns true if the objects are equal, false if they differ or if we don't know
func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
if checkopterr != nil {
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
}
defer close()
opt = b.WhichCheck(ctx, opt)
opt = WhichCheck(ctx, opt)
differ, noHash, err := opt.Check(ctx, dst, src)
if err != nil {
fs.Errorf(src, "failed to check: %v", err)
@@ -219,7 +217,7 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
equal, skipHash = timeSizeEqualFn()
if equal && !skipHash {
whichHashType := func(f fs.Info) hash.Type {
ht := b.getHashType(f.Name())
ht := getHashType(f.Name())
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
ht = f.Hashes().GetOne()
}
@@ -227,9 +225,9 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
}
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
srcHash, _ = b.tryDownloadHash(ctx, src, srcHash)
dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash)
equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
}
if equal {
logger(ctx, operations.Match, src, dst, nil)
@@ -249,7 +247,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
// note that arg order is path1, path2, regardless of src/dst
path1, path2 := b.resyncWhichIsWhich(src, dst)
if sizeDiffers(path1.Size(), path2.Size()) {
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), b.opt.ResyncMode)
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
}
@@ -259,7 +257,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
// note that arg order is path1, path2, regardless of src/dst
path1, path2 := b.resyncWhichIsWhich(src, dst)
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), b.opt.ResyncMode)
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
if !b.resyncWinningPathToEqual(winningPath) {
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2

View File

@@ -115,7 +115,6 @@ func (x *CheckSyncMode) Type() string {
}
// Opt keeps command line options
// internal functions should use b.opt instead
var Opt Options
func init() {

View File

@@ -28,7 +28,7 @@ type CompareOpt = struct {
DownloadHash bool
}
func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) {
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
ci := fs.GetConfig(ctx)
// defaults
@@ -120,25 +120,25 @@ func sizeDiffers(a, b int64) bool {
// returns true if the hashes are definitely different.
// returns false if equal, or if either is unknown.
func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if stringA == "" || stringB == "" {
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if a == "" || b == "" {
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB)
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
}
return false
}
if ht1 != ht2 {
if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
return false
}
}
return stringA != stringB
return a != b
}
// chooses hash type, giving priority to types both sides have in common
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash
downloadHash = b.opt.Compare.DownloadHash
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
} else {
@@ -268,15 +268,13 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
return nil
}
// b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
type downloadHashOpt struct {
downloadHash bool
downloadHashWarn mutex.Once
firstDownloadHash mutex.Once
}
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
var downloadHash bool
var downloadHashWarn mutex.Once
var firstDownloadHash mutex.Once
func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
if hashVal != "" || !b.downloadHashOpt.downloadHash {
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
if hashVal != "" || !downloadHash {
return hashVal, nil
}
obj, ok := o.(fs.Object)
@@ -285,14 +283,14 @@ func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal
return hashVal, fs.ErrorObjectNotFound
}
if o.Size() < 0 {
b.downloadHashOpt.downloadHashWarn.Do(func() {
downloadHashWarn.Do(func() {
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
})
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
return hashVal, hash.ErrUnsupported
}
b.downloadHashOpt.firstDownloadHash.Do(func() {
firstDownloadHash.Do(func() {
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
})
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")

View File

@@ -219,7 +219,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
}
}
if b.opt.Compare.Checksum {
if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
d |= deltaHash
@@ -346,7 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
if d2.is(deltaOther) {
// if size or hash differ, skip this, as we already know they're not equal
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
(b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
} else {
checkit := func(filename string) {
@@ -393,10 +393,10 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// if files are identical, leave them alone instead of renaming
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
b.march.ls1.getPut(file, skippedDirs1)
b.march.ls2.getPut(file, skippedDirs2)
ls1.getPut(file, skippedDirs1)
ls2.getPut(file, skippedDirs2)
b.debugFn(file, func() {
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName)))
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
})
} else {
equal := matches.Has(file)
@@ -409,16 +409,16 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// the Path1 version is deemed "correct" in this scenario
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
copy1to2.Add(file)
} else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) {
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
// Path2 is newer
b.indent("Path2", p1, "Queue copy to Path1")
copy2to1.Add(b.march.ls2.getTryAlias(file, alias))
copy2to1.Add(ls2.getTryAlias(file, alias))
} else {
// Path1 is newer
b.indent("Path1", p2, "Queue copy to Path2")
copy1to2.Add(b.march.ls1.getTryAlias(file, alias))
copy1to2.Add(ls1.getTryAlias(file, alias))
}
} else {
fs.Infof(nil, "Files are equal! Skipping: %s", file)
@@ -590,10 +590,10 @@ func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
fullMap1 := map[string]string{} // [transformedname]originalname
fullMap2 := map[string]string{} // [transformedname]originalname
for _, name := range b.march.ls1.list {
for _, name := range ls1.list {
fullMap1[transform(name)] = name
}
for _, name := range b.march.ls2.list {
for _, name := range ls2.list {
fullMap2[transform(name)] = name
}

View File

@@ -42,14 +42,10 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
// timeFormat defines time format used in listings
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
// TZ defines time zone used in listings
var (
// TZ defines time zone used in listings
TZ = time.UTC
tzLocal = false
// LogTZ defines time zone used in logs (which may be different than that used in listings).
// time.Local by default, but we force UTC on tests to make them deterministic regardless of tester's location.
LogTZ = time.Local
)
// fileInfo describes a file
@@ -202,8 +198,8 @@ func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool
equal = false
}
}
if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum {
if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
if b.opt.Compare.Checksum && !ignoreListingChecksum {
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
equal = false
}
@@ -247,7 +243,7 @@ func (ls *fileList) sort() {
}
// save will save listing to a file.
func (ls *fileList) save(listing string) error {
func (ls *fileList) save(ctx context.Context, listing string) error {
file, err := os.Create(listing)
if err != nil {
return err
@@ -712,9 +708,9 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
}
// update files
err = srcList.save(srcListing)
err = srcList.save(ctx, srcListing)
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
err = dstList.save(dstListing)
err = dstList.save(ctx, dstListing)
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
return err
@@ -745,7 +741,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
if hashType != hash.None {
hashVal, _ = obj.Hash(ctxRecheck, hashType)
}
hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal)
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
}
var modtime time.Time
if b.opt.Compare.Modtime {
@@ -759,7 +755,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
for _, dstObj := range dstObjs {
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
putObj(srcObj, srcList)
putObj(dstObj, dstList)
resolved = append(resolved, srcObj.Remote())
@@ -773,7 +769,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
// skip and error during --resync, as rollback is not possible
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
if b.opt.Resync {
err := errors.New("no dstObj match or files not equal")
err = errors.New("no dstObj match or files not equal")
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
} else {
toRollback = append(toRollback, srcObj.Remote())

View File

@@ -16,17 +16,16 @@ import (
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
type lockFileOpt struct {
stopRenewal func()
data struct {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}
}
var stopRenewal func()
func (b *bisyncRun) setLockFile() (err error) {
var data = struct {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}{}
func (b *bisyncRun) setLockFile() error {
b.lockFile = ""
b.setLockFileExpiration()
if !b.opt.DryRun {
@@ -46,23 +45,24 @@ func (b *bisyncRun) setLockFile() (err error) {
}
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
b.renewLockFile()
b.lockFileOpt.stopRenewal = b.startLockRenewal()
stopRenewal = b.startLockRenewal()
}
return nil
}
func (b *bisyncRun) removeLockFile() (err error) {
func (b *bisyncRun) removeLockFile() {
if b.lockFile != "" {
b.lockFileOpt.stopRenewal()
err = os.Remove(b.lockFile)
if err == nil {
stopRenewal()
errUnlock := os.Remove(b.lockFile)
if errUnlock == nil {
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
} else if err == nil {
err = errUnlock
} else {
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err)
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
}
b.lockFile = "" // block removing it again
}
return err
}
func (b *bisyncRun) setLockFileExpiration() {
@@ -77,18 +77,18 @@ func (b *bisyncRun) setLockFileExpiration() {
func (b *bisyncRun) renewLockFile() {
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
b.lockFileOpt.data.Session = b.basePath
b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid())
b.lockFileOpt.data.TimeRenewed = time.Now()
b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
data.Session = b.basePath
data.PID = strconv.Itoa(os.Getpid())
data.TimeRenewed = time.Now()
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
// save data file
df, err := os.Create(b.lockFile)
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true)
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
if b.opt.MaxLock < basicallyforever {
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires)
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
}
}
}
@@ -99,7 +99,7 @@ func (b *bisyncRun) lockFileIsExpired() bool {
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
dec := json.NewDecoder(rdf)
for {
if err := dec.Decode(&b.lockFileOpt.data); err != nil {
if err := dec.Decode(&data); err != nil {
if err != io.EOF {
fs.Errorf(b.lockFile, "err: %v", err)
}
@@ -107,14 +107,14 @@ func (b *bisyncRun) lockFileIsExpired() bool {
}
}
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) {
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires)
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
markFailed(b.listing2)
return true
}
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second))
prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo)
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
}
return false
}

View File

@@ -6,7 +6,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
@@ -68,15 +67,10 @@ func quotePath(path string) string {
}
// Colors controls whether terminal colors are enabled
var (
Colors bool
ColorsLock sync.Mutex
)
var Colors bool
// Color handles terminal colors for bisync
func Color(style string, s string) string {
ColorsLock.Lock()
defer ColorsLock.Unlock()
if !Colors {
return s
}
@@ -86,8 +80,6 @@ func Color(style string, s string) string {
// ColorX handles terminal colors for bisync
func ColorX(style string, s string) string {
ColorsLock.Lock()
defer ColorsLock.Unlock()
if !Colors {
return s
}

View File

@@ -12,20 +12,18 @@ import (
"github.com/rclone/rclone/fs/march"
)
type bisyncMarch struct {
ls1 *fileList
ls2 *fileList
err error
firstErr error
marchAliasLock sync.Mutex
marchLsLock sync.Mutex
marchErrLock sync.Mutex
marchCtx context.Context
}
var ls1 = newFileList()
var ls2 = newFileList()
var err error
var firstErr error
var marchAliasLock sync.Mutex
var marchLsLock sync.Mutex
var marchErrLock sync.Mutex
var marchCtx context.Context
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
ci := fs.GetConfig(ctx)
b.march.marchCtx = ctx
marchCtx = ctx
b.setupListing()
fs.Debugf(b, "starting to march!")
@@ -41,31 +39,31 @@ func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList,
NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization,
}
b.march.err = m.Run(ctx)
err = m.Run(ctx)
fs.Debugf(b, "march completed. err: %v", b.march.err)
if b.march.err == nil {
b.march.err = b.march.firstErr
fs.Debugf(b, "march completed. err: %v", err)
if err == nil {
err = firstErr
}
if b.march.err != nil {
b.handleErr("march", "error during march", b.march.err, true, true)
if err != nil {
b.handleErr("march", "error during march", err, true, true)
b.abort = true
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// save files
if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None {
b.march.ls1.hash = hash.MD5
if b.opt.Compare.DownloadHash && ls1.hash == hash.None {
ls1.hash = hash.MD5
}
if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None {
b.march.ls2.hash = hash.MD5
if b.opt.Compare.DownloadHash && ls2.hash == hash.None {
ls2.hash = hash.MD5
}
b.march.err = b.march.ls1.save(b.newListing1)
b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true)
b.march.err = b.march.ls2.save(b.newListing2)
b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true)
err = ls1.save(ctx, b.newListing1)
b.handleErr(ls1, "error saving ls1 from march", err, true, true)
err = ls2.save(ctx, b.newListing2)
b.handleErr(ls2, "error saving ls2 from march", err, true, true)
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// SrcOnly have an object which is on path1 only
@@ -85,9 +83,9 @@ func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
// Match is called when object exists on both path1 and path2 (whether equal or not)
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
fs.Debugf(o1, "both path1 and path2")
b.march.marchAliasLock.Lock()
marchAliasLock.Lock()
b.aliases.Add(o1.Remote(), o2.Remote())
b.march.marchAliasLock.Unlock()
marchAliasLock.Unlock()
b.parse(o1, true)
b.parse(o2, false)
return isDir(o1)
@@ -121,76 +119,76 @@ func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
}
func (b *bisyncRun) setupListing() {
b.march.ls1 = newFileList()
b.march.ls2 = newFileList()
ls1 = newFileList()
ls2 = newFileList()
// note that --ignore-listing-checksum is different from --ignore-checksum
// and we already checked it when we set b.opt.Compare.HashType1 and 2
b.march.ls1.hash = b.opt.Compare.HashType1
b.march.ls2.hash = b.opt.Compare.HashType2
ls1.hash = b.opt.Compare.HashType1
ls2.hash = b.opt.Compare.HashType2
}
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
defer func() {
tr.Done(b.march.marchCtx, nil)
tr.Done(marchCtx, nil)
}()
var (
hashVal string
hashErr error
)
ls := b.whichLs(isPath1)
ls := whichLs(isPath1)
hashType := ls.hash
if hashType != hash.None {
hashVal, hashErr = o.Hash(b.march.marchCtx, hashType)
b.march.marchErrLock.Lock()
if b.march.firstErr == nil {
b.march.firstErr = hashErr
hashVal, hashErr = o.Hash(marchCtx, hashType)
marchErrLock.Lock()
if firstErr == nil {
firstErr = hashErr
}
b.march.marchErrLock.Unlock()
marchErrLock.Unlock()
}
hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal)
b.march.marchErrLock.Lock()
if b.march.firstErr == nil {
b.march.firstErr = hashErr
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal)
marchErrLock.Lock()
if firstErr == nil {
firstErr = hashErr
}
if b.march.firstErr != nil {
b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true)
if firstErr != nil {
b.handleErr(hashType, "error hashing during march", firstErr, false, true)
}
b.march.marchErrLock.Unlock()
marchErrLock.Unlock()
var modtime time.Time
if b.opt.Compare.Modtime {
modtime = o.ModTime(b.march.marchCtx).In(TZ)
modtime = o.ModTime(marchCtx).In(TZ)
}
id := "" // TODO: ID(o)
flags := "-" // "-" for a file and "d" for a directory
b.march.marchLsLock.Lock()
marchLsLock.Lock()
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
b.march.marchLsLock.Unlock()
marchLsLock.Unlock()
}
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
defer func() {
tr.Done(b.march.marchCtx, nil)
tr.Done(marchCtx, nil)
}()
ls := b.whichLs(isPath1)
ls := whichLs(isPath1)
var modtime time.Time
if b.opt.Compare.Modtime {
modtime = o.ModTime(b.march.marchCtx).In(TZ)
modtime = o.ModTime(marchCtx).In(TZ)
}
id := "" // TODO
flags := "d" // "-" for a file and "d" for a directory
b.march.marchLsLock.Lock()
marchLsLock.Lock()
ls.put(o.Remote(), -1, modtime, "", id, flags)
b.march.marchLsLock.Unlock()
marchLsLock.Unlock()
}
func (b *bisyncRun) whichLs(isPath1 bool) *fileList {
ls := b.march.ls1
func whichLs(isPath1 bool) *fileList {
ls := ls1
if !isPath1 {
ls = b.march.ls2
ls = ls2
}
return ls
}
@@ -208,7 +206,7 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
ci := fs.GetConfig(ctxCheckFile)
b.march.marchCtx = ctxCheckFile
marchCtx = ctxCheckFile
b.setupListing()
fs.Debugf(b, "starting to march!")
@@ -225,18 +223,18 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization,
}
b.march.err = m.Run(ctxCheckFile)
err = m.Run(ctxCheckFile)
fs.Debugf(b, "march completed. err: %v", b.march.err)
if b.march.err == nil {
b.march.err = b.march.firstErr
fs.Debugf(b, "march completed. err: %v", err)
if err == nil {
err = firstErr
}
if b.march.err != nil {
b.handleErr("march", "error during findCheckFiles", b.march.err, true, true)
if err != nil {
b.handleErr("march", "error during findCheckFiles", err, true, true)
b.abort = true
}
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// ID returns the ID of the Object if known, or "" if not

View File

@@ -51,11 +51,6 @@ type bisyncRun struct {
lockFile string
renames renames
resyncIs1to2 bool
march bisyncMarch
check bisyncCheck
queueOpt bisyncQueueOpt
downloadHashOpt downloadHashOpt
lockFileOpt lockFileOpt
}
type queues struct {
@@ -69,6 +64,7 @@ type queues struct {
// Bisync handles lock file, performs bisync run and checks exit status
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
defer resetGlobals()
opt := *optArg // ensure that input is never changed
b := &bisyncRun{
fs1: fs1,
@@ -87,9 +83,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
opt.OrigBackupDir = ci.BackupDir
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
ColorsLock.Lock()
Colors = true
ColorsLock.Unlock()
}
err = b.setCompareDefaults(ctx)
@@ -99,7 +93,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
b.setResyncDefaults()
err = b.setResolveDefaults()
err = b.setResolveDefaults(ctx)
if err != nil {
return err
}
@@ -130,8 +124,6 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
return err
}
b.queueOpt.logger = operations.NewLoggerOpt()
// Handle SIGINT
var finaliseOnce gosync.Once
@@ -169,7 +161,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
markFailed(b.listing1)
markFailed(b.listing2)
}
err = b.removeLockFile()
b.removeLockFile()
}
})
}
@@ -179,10 +171,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
// run bisync
err = b.runLocked(ctx)
removeLockErr := b.removeLockFile()
if err == nil {
err = removeLockErr
}
b.removeLockFile()
b.CleanupCompleted = true
if b.InGracefulShutdown {
@@ -273,7 +262,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
if opt.Resync {
return b.resync(fctx)
return b.resync(octx, fctx)
}
// Check for existence of prior Path1 and Path2 listings
@@ -308,7 +297,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
}
fs.Infof(nil, "Building Path1 and Path2 listings")
b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx)
ls1, ls2, err = b.makeMarchListing(fctx)
if err != nil || accounting.Stats(fctx).Errored() {
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
b.critical = true
@@ -318,7 +307,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path1 deltas relative to the prior sync
fs.Infof(nil, "Path1 checking for diffs")
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1")
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1")
if err != nil {
return err
}
@@ -326,7 +315,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path2 deltas relative to the prior sync
fs.Infof(nil, "Path2 checking for diffs")
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2")
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2")
if err != nil {
return err
}
@@ -400,7 +389,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
newl1, _ := b.loadListing(b.newListing1)
newl2, _ := b.loadListing(b.newListing2)
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
}
b.saveOldListings()
// save new listings
@@ -564,7 +553,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
return ctx
}
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) {
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
return err
@@ -597,7 +586,7 @@ func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs)
return nil
}
func (b *bisyncRun) checkSyntax() (err error) {
func (b *bisyncRun) checkSyntax() error {
// check for odd number of quotes in path, usually indicating an escaping issue
path1 := bilib.FsPath(b.fs1)
path2 := bilib.FsPath(b.fs2)
@@ -645,3 +634,25 @@ func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
}
return false
}
// mainly to make sure tests don't interfere with each other when running more than one
func resetGlobals() {
downloadHash = false
logger = operations.NewLoggerOpt()
ignoreListingChecksum = false
ignoreListingModtime = false
hashTypes = nil
queueCI = nil
hashType = 0
fsrc, fdst = nil, nil
fcrypt = nil
Opt = Options{}
once = gosync.Once{}
downloadHashWarn = gosync.Once{}
firstDownloadHash = gosync.Once{}
ls1 = newFileList()
ls2 = newFileList()
err = nil
firstErr = nil
marchCtx = nil
}

View File

@@ -51,19 +51,19 @@ func (rs *ResultsSlice) has(name string) bool {
return false
}
type bisyncQueueOpt struct {
logger operations.LoggerOpt
var (
logger = operations.NewLoggerOpt()
lock mutex.Mutex
once mutex.Once
ignoreListingChecksum bool
ignoreListingModtime bool
hashTypes map[string]hash.Type
queueCI *fs.ConfigInfo
}
)
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
func (b *bisyncRun) getHashType(fname string) hash.Type {
ht, ok := b.queueOpt.hashTypes[fname]
func getHashType(fname string) hash.Type {
ht, ok := hashTypes[fname]
if ok {
return ht
}
@@ -106,9 +106,9 @@ func altName(name string, src, dst fs.DirEntry) string {
}
// WriteResults is Bisync's LoggerFn
func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
b.queueOpt.lock.Lock()
defer b.queueOpt.lock.Unlock()
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
lock.Lock()
defer lock.Unlock()
opt := operations.GetLoggerOpt(ctx)
result := Results{
@@ -131,14 +131,14 @@ func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, sr
result.Flags = "-"
if side != nil {
result.Size = side.Size()
if !b.queueOpt.ignoreListingModtime {
if !ignoreListingModtime {
result.Modtime = side.ModTime(ctx).In(TZ)
}
if !b.queueOpt.ignoreListingChecksum {
if !ignoreListingChecksum {
sideObj, ok := side.(fs.ObjectInfo)
if ok {
result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name()))
result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash)
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
}
}
@@ -159,8 +159,8 @@ func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, sr
}
prettyprint(result, "writing result", fs.LogLevelDebug)
if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) {
b.queueOpt.once.Do(func() {
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
once.Do(func() {
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
})
}
@@ -189,14 +189,14 @@ func ReadResults(results io.Reader) []Results {
// for setup code shared by both fastCopy and resyncDir
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
b.queueOpt.queueCI = fs.GetConfig(ctx)
b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum
b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime
b.queueOpt.hashTypes = map[string]hash.Type{
queueCI = fs.GetConfig(ctx)
ignoreListingChecksum = b.opt.IgnoreListingChecksum
ignoreListingModtime = !b.opt.Compare.Modtime
hashTypes = map[string]hash.Type{
b.fs1.Name(): b.opt.Compare.HashType1,
b.fs2.Name(): b.opt.Compare.HashType2,
}
b.queueOpt.logger.LoggerFn = b.WriteResults
logger.LoggerFn = WriteResults
overridingEqual := false
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
overridingEqual = true
@@ -209,15 +209,15 @@ func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
fs.Debugf(nil, "overriding equal")
ctx = b.EqualFn(ctx)
}
ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger)
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
// set here in case !b.opt.Compare.Modtime
b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger)
queueCI = fs.GetConfig(ctxCopyLogger)
if b.opt.Compare.NoSlowHash {
b.queueOpt.queueCI.CheckSum = false
queueCI.CheckSum = false
}
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
b.queueOpt.queueCI.CheckSum = true
queueCI.CheckSum = true
}
}
return ctxCopyLogger
@@ -245,16 +245,14 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
}
}
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
if accounting.MaxCompletedTransfers != -1 {
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
}
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
b.testFn()
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(b.queueOpt.logger.JSON)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
lineFormat := "%s %8d %s %s %s %q\n"
@@ -294,9 +292,9 @@ func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results,
ctx = b.preCopy(ctx)
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(b.queueOpt.logger.JSON)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
return getResults, err

View File

@@ -77,7 +77,7 @@ func (conflictLoserChoices) Type() string {
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
var ConflictLoserList = Opt.ConflictLoser.Help()
func (b *bisyncRun) setResolveDefaults() error {
func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
if b.opt.ConflictLoser == ConflictLoserSkip {
b.opt.ConflictLoser = ConflictLoserNumber
}
@@ -135,7 +135,7 @@ type namePair struct {
newName string
}
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) {
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error {
winningPath := 0
if b.opt.ConflictResolve != PreferNone {
winningPath = b.conflictWinner(ds1, ds2, file, alias)
@@ -197,7 +197,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
// delete 2, copy 1 to 2
err = b.delete(ctxMove, r.path2, path2, b.fs2, 2, renameSkipped)
err = b.delete(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, renameSkipped)
if err != nil {
return err
}
@@ -207,7 +207,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
copy1to2.Add(r.path1.oldName)
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
// delete 1, copy 2 to 1
err = b.delete(ctxMove, r.path1, path1, b.fs1, 1, renameSkipped)
err = b.delete(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, renameSkipped)
if err != nil {
return err
}
@@ -261,15 +261,15 @@ func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
for i := startnum; i < math.MaxInt; i++ {
iStr := fmt.Sprint(i)
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
// make sure it still holds true with suffixes switched (it should)
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
fs.Debugf(file, "The first available suffix is: %s", iStr)
return i
}
@@ -280,10 +280,10 @@ func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias stri
// like numerate, but consider only one side's suffix (for when suffixes are different)
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
lsA, lsB := b.march.ls1, b.march.ls2
lsA, lsB := ls1, ls2
suffix := b.opt.ConflictSuffix1
if path == 2 {
lsA, lsB = b.march.ls2, b.march.ls1
lsA, lsB = ls2, ls1
suffix = b.opt.ConflictSuffix2
}
for i := startnum; i < math.MaxInt; i++ {
@@ -299,7 +299,7 @@ func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alia
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
}
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) {
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error {
if winningPath == thisPathNum {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
} else {
@@ -321,7 +321,7 @@ func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath,
return nil
}
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath string, thisFs fs.Fs, thisPathNum int, renameSkipped *bilib.Names) (err error) {
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error {
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
if !skip {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
@@ -359,17 +359,17 @@ func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string)
return 2
case PreferNewer, PreferOlder:
t1, t2 := ds1.time[remote1], ds2.time[remote2]
return b.resolveNewerOlder(t1, t2, remote1, b.opt.ConflictResolve)
return b.resolveNewerOlder(t1, t2, remote1, remote2, b.opt.ConflictResolve)
case PreferLarger, PreferSmaller:
s1, s2 := ds1.size[remote1], ds2.size[remote2]
return b.resolveLargerSmaller(s1, s2, remote1, b.opt.ConflictResolve)
return b.resolveLargerSmaller(s1, s2, remote1, remote2, b.opt.ConflictResolve)
default:
return 0
}
}
// returns the winning path number, or 0 if winner can't be determined
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer Prefer) int {
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string, prefer Prefer) int {
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
return 0
@@ -380,31 +380,31 @@ func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer P
}
if t1.After(t2) {
if prefer == PreferNewer {
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
return 1
} else if prefer == PreferOlder {
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
return 2
}
} else if t1.Before(t2) {
if prefer == PreferNewer {
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 2
} else if prefer == PreferOlder {
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 1
}
}
if t1.Equal(t2) {
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 0
}
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.In(LogTZ), t2.In(LogTZ)) // shouldn't happen unless prefer is of wrong type
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.Local(), t2.Local()) // shouldn't happen unless prefer is of wrong type
return 0
}
// returns the winning path number, or 0 if winner can't be determined
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1 string, prefer Prefer) int {
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1, remote2 string, prefer Prefer) int {
if s1 < 0 || s2 < 0 {
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
return 0

View File

@@ -20,6 +20,7 @@ func (b *bisyncRun) setResyncDefaults() {
}
if b.opt.ResyncMode != PreferNone {
b.opt.Resync = true
Opt.Resync = true // shouldn't be using this one, but set to be safe
}
// checks and warnings
@@ -40,18 +41,18 @@ func (b *bisyncRun) setResyncDefaults() {
// It will generate path1 and path2 listings,
// copy any unique files to the opposite path,
// and resolve any differing files according to the --resync-mode.
func (b *bisyncRun) resync(fctx context.Context) (err error) {
func (b *bisyncRun) resync(octx, fctx context.Context) error {
fs.Infof(nil, "Copying Path2 files to Path1")
// Save blank filelists (will be filled from sync results)
ls1 := newFileList()
ls2 := newFileList()
err = ls1.save(b.newListing1)
var ls1 = newFileList()
var ls2 = newFileList()
err = ls1.save(fctx, b.newListing1)
if err != nil {
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
b.abort = true
}
err = ls2.save(b.newListing2)
err = ls2.save(fctx, b.newListing2)
if err != nil {
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
b.abort = true

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// Package cachestats provides the cachestats command.
package cachestats

View File

@@ -1,7 +1,7 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js || wasm
//go:build plan9 || js
// Package cachestats provides the cachestats command.
package cachestats

View File

@@ -130,12 +130,6 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
if node.IsDir() {
dirent.Type = fuse.DT_Dir
}
switch node := node.(type) {
case *vfs.File:
if node.IsSymlink() {
dirent.Type = fuse.DT_Link
}
}
dirents = append(dirents, dirent)
}
itemsRead = len(dirents)

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
package mountlib

View File

@@ -1,4 +1,4 @@
//go:build plan9 || js || wasm
//go:build plan9 || js
package mountlib

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !js && !wasm
//go:build !plan9 && !js
// Package ncdu implements a text based user interface for exploring a remote
package ncdu

View File

@@ -1,7 +1,7 @@
// Build for ncdu for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || js || wasm
//go:build plan9 || js
// Package ncdu implements a text based user interface for exploring a remote
package ncdu

View File

@@ -1,4 +1,4 @@
//go:build !windows && !plan9 && !js && !wasm && !noselfupdate
//go:build !windows && !plan9 && !js && !noselfupdate
package selfupdate

View File

@@ -1,4 +1,4 @@
//go:build (plan9 || js || wasm) && !noselfupdate
//go:build (plan9 || js) && !noselfupdate
package selfupdate

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
package sftp

View File

@@ -1,4 +1,4 @@
//go:build !plan9 && !wasm
//go:build !plan9
// Package sftp implements an SFTP server to serve an rclone VFS
package sftp

View File

@@ -3,7 +3,7 @@
//
// We skip tests on platforms with troublesome character mappings
//go:build !windows && !darwin && !plan9 && !wasm
//go:build !windows && !darwin && !plan9
package sftp

View File

@@ -1,7 +1,7 @@
// Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || wasm
//go:build plan9
// Package sftp implements an SFTP server to serve an rclone VFS
package sftp

View File

@@ -44,11 +44,9 @@ want to delete files from destination, use the
rclone sync --interactive SOURCE remote:DESTINATION
Files in the destination won't be deleted if there were any errors at any
point. Duplicate objects (files with the same name, on those providers that
support it) are not yet handled. Files that are excluded won't be deleted
unless |--delete-excluded| is used. Symlinks won't be transferred or
deleted from local file systems unless |--links| is used.
Note that files in the destination won't be deleted if there were any
errors at any point. Duplicate objects (files with the same name, on
those providers that support it) are also not yet handled.
It is always the contents of the directory that is synced, not the
directory itself. So when source:path is a directory, it's the contents of

View File

@@ -122,7 +122,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}

View File

@@ -998,8 +998,3 @@ put them back in again.` >}}
- houance <45840516+houance@users.noreply.github.com>
- Yu Xin <3188+is@users.noreply.github.com>
- Antonin Goude <antonin.goude@ovhcloud.com>
- Timothy Jacobs <timothy@ironbounddesigns.com>
- praveen-solanki-oracle <praveen.solanki@oracle.com>
- huangnauh <huanglibo2010@gmail.com>
- Lucas Bremgartner <breml@users.noreply.github.com>
- Binbin Qian <qianbinbin@hotmail.com>

Some files were not shown because too many files have changed in this diff Show More