1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-08 11:33:33 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
279b48ccbd vfs: make --vfs-download-first flag WIP FIXME
Needs
- tests
- docs
2025-07-28 12:20:58 +01:00
396 changed files with 40427 additions and 76293 deletions

View File

@@ -29,12 +29,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -45,14 +45,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -61,14 +61,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -78,14 +78,14 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.24
- job_name: go1.23
os: ubuntu-latest
go: '1.24'
go: '1.23'
quicktest: true
racequicktest: true
@@ -95,12 +95,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -216,15 +216,15 @@ jobs:
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.24.0-rc.1'
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
@@ -239,13 +239,13 @@ jobs:
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
@@ -253,7 +253,7 @@ jobs:
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
@@ -261,7 +261,7 @@ jobs:
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
@@ -269,7 +269,7 @@ jobs:
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
@@ -290,8 +290,7 @@ jobs:
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
docs/content/{authors,bugs,changelog,cluster,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
@@ -305,15 +304,15 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.25.0-rc.1'
go-version: '>=1.24.0-rc.1'
- name: Set global environment variables
run: |

View File

@@ -52,7 +52,7 @@ jobs:
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -92,7 +92,7 @@ jobs:
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
result-encoding: string
script: |
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -30,7 +30,7 @@ jobs:
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -1,146 +1,144 @@
version: "2"
# golangci-lint configuration options
linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable:
# Default
- errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports
- revive
- ineffassign
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -628,12 +628,11 @@ You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`

46416
MANUAL.html generated

File diff suppressed because it is too large Load Diff

20435
MANUAL.md generated

File diff suppressed because it is too large Load Diff

5941
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -100,7 +100,6 @@ compiletest:
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies
@@ -114,21 +113,21 @@ release_dep_linux:
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
go mod tidy
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies
update:
go get -u -t ./...
go mod tidy
GO111MODULE=on go get -d -u -t ./...
GO111MODULE=on go mod tidy
# Tidy the module dependencies
tidy:
go mod tidy
GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
@@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
@@ -246,7 +243,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

View File

@@ -39,7 +39,6 @@ directories to and from different cloud storage providers.
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
@@ -50,7 +49,6 @@ directories to and from different cloud storage providers.
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- HTTP [:page_facing_up:](https://rclone.org/http/)
@@ -60,7 +58,6 @@ directories to and from different cloud storage providers.
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
@@ -96,7 +93,6 @@ directories to and from different cloud storage providers.
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
@@ -108,7 +104,6 @@ directories to and from different cloud storage providers.
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)

View File

@@ -1 +1 @@
v1.72.0
v1.71.0

View File

@@ -51,7 +51,6 @@ import (
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
@@ -1338,9 +1337,9 @@ func (f *Fs) containerOK(container string) bool {
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if !f.containerOK(containerName) {
return fs.ErrorDirNotFound
return nil, fs.ErrorDirNotFound
}
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -1348,16 +1347,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(containerName)
return nil
return entries, nil
}
// listContainers returns all the containers to out
@@ -1393,47 +1392,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return fs.ErrorListBucketRequired
return nil, fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
}
return f.listContainers(ctx)
}
return list.Flush()
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -2152,6 +2118,7 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
}
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
v := v
metadata[k] = &v
}
return metadata
@@ -2703,13 +2670,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
@@ -3186,7 +3146,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -453,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
options := azidentity.AzureCLICredentialOptions{}
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
@@ -550,7 +550,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
b2i := map[bool]int{false: 0, true: 1}
var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set")
@@ -583,6 +583,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
@@ -854,7 +855,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return entries, err
}
opt := &directory.ListFilesAndDirectoriesOptions{
var opt = &directory.ListFilesAndDirectoriesOptions{
Include: directory.ListFilesInclude{
Timestamps: true,
},
@@ -1013,10 +1014,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
SMBProperties: &file.SMBProperties{
LastWriteTime: &t,
},
HTTPHeaders: &file.HTTPHeaders{
ContentMD5: o.md5,
ContentType: &o.contentType,
},
}
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
if err != nil {
@@ -1313,29 +1310,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
srcURL := srcObj.fileClient().URL()
fc := f.fileClient(remote)
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
if err != nil {
return nil, fmt.Errorf("Copy failed: %w", err)
}
// Poll for completion if necessary
//
// The for loop is never executed for same storage account copies.
copyStatus := startCopy.CopyStatus
var properties file.GetPropertiesResponse
pollTime := 100 * time.Millisecond
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
time.Sleep(pollTime)
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
if err != nil {
return nil, err
}
copyStatus = properties.CopyStatus
pollTime = min(2*pollTime, time.Second)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)

View File

@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
last := ""
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -890,46 +890,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -2224,17 +2192,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
return info, nil, err
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: up.chunkSize,
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
return info, up, nil
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
}
// Remove an object
@@ -2464,7 +2428,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}

View File

@@ -125,21 +125,10 @@ type FolderItems struct {
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually
// returned. The []struct has worked for many years, but in
// https://github.com/rclone/rclone/issues/8776 box was
// returning it returned not as a list. We don't actually use
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
@@ -282,9 +271,9 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`

View File

@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:

View File

@@ -187,6 +187,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
@@ -240,22 +241,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove, slowHash := true, false
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
slowHash = slowHash || u.f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
@@ -369,6 +366,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
@@ -635,6 +633,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)

View File

@@ -598,7 +598,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err

View File

@@ -18,7 +18,7 @@ type headerLink struct {
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
for _, link := range strings.Split(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
for _, part := range strings.Split(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}

View File

@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for scope := range strings.SplitSeq(scopesString, ",") {
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for extension := range strings.SplitSeq(extensionText, ",") {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue

View File

@@ -386,6 +386,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
@@ -519,6 +520,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)

View File

@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
}
usage = &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(used), // bytes in use
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID any `json:"fld_id"`
FldID interface{} `json:"fld_id"`
} `json:"result"`
}

View File

@@ -14,7 +14,7 @@ import (
)
// errFileNotFound represent file not found error
var errFileNotFound = errors.New("file not found")
var errFileNotFound error = errors.New("file not found")
// getFileCode retrieves the file code for a given file path
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {

View File

@@ -163,16 +163,6 @@ Enabled by default. Use 0 to disable.`,
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "allow_insecure_tls_ciphers",
Help: `Allow insecure TLS ciphers
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
- TLS_RSA_WITH_AES_128_GCM_SHA256
`,
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
@@ -246,30 +236,29 @@ a write only folder.
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@@ -283,7 +272,6 @@ type Fs struct {
user string
pass string
dialAddr string
tlsConf *tls.Config // default TLS client config
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
@@ -409,14 +397,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
if f.tlsConf != nil {
tlsConfig = f.tlsConf.Clone()
} else {
tlsConfig = new(tls.Config)
}
tlsConfig.ServerName = f.opt.Host
if f.opt.SkipVerifyTLSCert {
tlsConfig.InsecureSkipVerify = true
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
@@ -424,14 +407,6 @@ func (f *Fs) tlsConfig() *tls.Config {
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
if f.opt.AllowInsecureTLSCiphers {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
}
}
return tlsConfig
}
@@ -677,7 +652,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,

View File

@@ -252,9 +252,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-east4",
Help: "Northern Virginia",
}, {
Value: "us-east5",
Help: "Ohio",
}, {
Value: "us-west1",
Help: "Oregon",
@@ -763,7 +760,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -771,16 +768,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return err
return entries, err
}
// listBuckets lists the buckets
@@ -823,46 +820,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1497,7 +1462,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(info.Capacity),
Used: fs.NewUsageValue(info.Used),
Free: fs.NewUsageValue(info.Remaining),
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}

View File

@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
}
// Copy src to this remote using server-side copy operations.
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
"x-archive-auto-make-bucket": "1",
"x-archive-queue-derive": "0",
"x-archive-keep-old-version": "0",
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
"x-amz-metadata-directive": "COPY",
"x-archive-filemeta-sha1": srcObj.sha1,
"x-archive-filemeta-md5": srcObj.md5,
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1334,6 +1334,16 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
for _, v := range seg {
newValues = append(newValues, url.PathEscape(v))
}
return strings.Join(newValues, "/")
}
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}

View File

@@ -17,7 +17,6 @@ import (
"net/url"
"os"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -60,43 +59,31 @@ const (
configVersion = 1
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
defaultClientID = "jottacli"
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
type service struct {
key string
name string
domain string
realm string
clientID string
scopes []string
}
// The list of services and their settings for supporting traditional OAuth.
// Please keep these in alphabetical order, but with jottacloud first.
func getServices() []service {
return []service{
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
}
}
// Register with Fs
func init() {
// needs to be done early so we can use oauth during config
@@ -172,44 +159,36 @@ func init() {
}
// Config runs the backend configuration protocol
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
switch conf.State {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
return nil, errors.New("not supported by this backend")
}
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
Value: "standard",
Help: `Standard authentication.
This is primarily supported by the official service, but may also be
supported by some white-label services. It is designed for command-line
applications, and you will be asked to enter a single-use personal login
token which you must manually generate from the account security settings
in the web interface of your service.`,
}, {
Value: "traditional",
Help: `Traditional authentication.
This is supported by the official service and all white-label services
that rclone knows about. You will be asked which service to connect to.
It has a limitation of only a single active authentication at a time. You
need to be on, or have access to, a machine with an internet-connected
web browser.`,
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, {
Value: "legacy",
Help: `Legacy authentication.
This is no longer supported by any known services and not recommended
used. You will be asked for your account's username and password.`,
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result)
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
Generate it from the account security settings in the web interface of your
service, for the official service on https://www.jottacloud.com/web/secure.`)
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := conf.Result
loginToken := config.Result
m.Set(configClientID, defaultClientID)
m.Set(configClientSecret, "")
@@ -224,50 +203,10 @@ service, for the official service on https://www.jottacloud.com/web/secure.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "traditional":
services := getServices()
options := make([]fs.OptionExample, 0, len(services))
for _, service := range services {
options = append(options, fs.OptionExample{
Value: service.key,
Help: service.name,
})
}
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
options)
case "traditional_type":
services := getServices()
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
if i == -1 {
return nil, fmt.Errorf("unexpected service %q", conf.Result)
}
service := services[i]
opts := rest.Opts{
Method: "GET",
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
}
var wellKnown api.WellKnown
srv := rest.NewClient(fshttp.NewClient(ctx))
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
}
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, service.clientID)
m.Set(configTokenURL, wellKnown.TokenEndpoint)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: wellKnown.AuthorizationEndpoint,
TokenURL: wellKnown.TokenEndpoint,
ClientID: service.clientID,
Scopes: service.scopes,
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one
only uses rclone on a single machine. When you want to use rclone with
this account on more than one machine it's recommended to create a
@@ -275,7 +214,7 @@ machine specific API key. These keys can NOT be shared between
machines.`)
case "legacy_api":
srv := rest.NewClient(fshttp.NewClient(ctx))
if conf.Result == "true" {
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, fmt.Errorf("failed to register device: %w", err)
@@ -284,16 +223,16 @@ machines.`)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
case "legacy_username":
m.Set(configUsername, conf.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
m.Set(configUsername, config.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
case "legacy_password":
m.Set("password", conf.Result)
m.Set("password", config.Result)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
@@ -303,12 +242,12 @@ machines.`)
authCode, _ := m.Get("auth_code")
srv := rest.NewClient(fshttp.NewClient(ctx))
clientID, _ := m.Get(configClientID)
if clientID == "" {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, _ := m.Get(configClientSecret)
if clientSecret == "" {
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
@@ -321,7 +260,7 @@ machines.`)
}
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
if err == errAuthCodeRequired {
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
}
m.Set("password", "")
m.Set("auth_code", "")
@@ -333,6 +272,58 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "tele2": // tele2 cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -340,7 +331,7 @@ section of the official Jottacloud client. If you instead want to access the
sync or the backup section, for example, you must choose yes.`)
case "choose_device_query":
if conf.Result != "true" {
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
return fs.ConfigGoto("end")
@@ -381,7 +372,7 @@ a new by entering a unique name.`, defaultDevice)
return deviceNames[i], ""
})
case "choose_device_result":
device := conf.Result
device := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
@@ -441,7 +432,7 @@ You may create a new by entering a unique name.`, device)
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := conf.Result
mountpoint := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
@@ -472,7 +463,7 @@ You may create a new by entering a unique name.`, device)
if isNew {
if device == defaultDevice {
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
}
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
@@ -487,7 +478,7 @@ You may create a new by entering a unique name.`, device)
// All the config flows end up here in case we need to carry on with something
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", conf.State)
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
@@ -938,12 +929,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
clientID, _ := m.Get(configClientID)
if clientID == "" {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, _ := m.Get(configClientSecret)
if clientSecret == "" {
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
oauthConfig.ClientID = clientID
@@ -1009,13 +1000,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features.ListR = nil
}
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
f.user = cust.Username
f.setEndpoints()
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
@@ -1025,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return err
})
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
f.user = cust.Username
f.setEndpoints()
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
remote := path.Base(root)

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"path/filepath"
@@ -672,12 +671,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
symlinkFlag := os.ModeSymlink
if runtime.GOOS == "windows" {
symlinkFlag |= os.ModeIrregular
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
@@ -699,13 +694,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
}
// Don't include non directory if not included
@@ -842,13 +837,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
err := os.Remove(localPath)
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
if os.Chmod(localPath, 0o600) == nil {
err = os.Remove(localPath)
}
}
return err
return os.Remove(localPath)
}
// Precision of the file system

View File

@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Local().Format(time.RFC3339Nano)
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)

View File

@@ -1,40 +0,0 @@
//go:build windows
package local
import (
"context"
"path/filepath"
"runtime"
"syscall"
"testing"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
// Microsoft docs indicate that "This attribute is not honored on directories."
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
// and https://github.com/golang/go/issues/26295
func TestRmdirWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skipf("windows only")
}
r := fstest.NewRun(t)
defer r.Finalise()
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
require.NoError(t, err)
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
require.NoError(t, err)
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
require.NoError(t, err)
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
assert.NoError(t, err)
}

View File

@@ -400,7 +400,7 @@ type quirks struct {
}
func (q *quirks) parseQuirks(option string) {
for flag := range strings.SplitSeq(option, ",") {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupAny = false
uniqueValidPatterns := make(map[string]any)
for pattern := range strings.SplitSeq(patternString, ",") {
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
if pattern == "" {
continue

View File

@@ -946,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@@ -325,12 +325,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return callback(entry)
entries = append(entries, entry)
return nil
})
return err
return entries, err
}
// listBuckets lists the buckets to entries
@@ -353,46 +354,15 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -659,7 +629,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -243,6 +243,7 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
numSet = 0
for k, v := range metadata {
k, v := k, v
switch k {
case "mtime":
t, err := time.Parse(timeFormatIn, v)
@@ -421,7 +422,12 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {

View File

@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
require.NoError(b, err)
require.Equal(b, len(buf), n)
h := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)

View File

@@ -12,7 +12,6 @@ import (
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
@@ -34,46 +33,9 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as opc-meta- keys.`,
},
})
}
var systemMetadataInfo = map[string]fs.MetadataHelp{
"opc-meta-mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"opc-meta-uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-atime": {
Help: "Time of last access",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-mtime": {
Help: "Time of last modification",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-btime": {
Help: "Time of file birth (creation)",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
@@ -120,7 +82,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMetadata: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
@@ -254,47 +215,15 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
@@ -443,24 +372,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -759,45 +688,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return list.Flush()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx)
if err != nil {
return nil, err
}
metadata = make(fs.Metadata, len(o.meta)+7)
for k, v := range o.meta {
switch k {
case metaMtime:
if modTime, err := swift.FloatStringToTime(v); err == nil {
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
}
case metaMD5Hash:
// don't write hash metadata
default:
metadata[k] = v
}
}
if o.mimeType != "" {
metadata["content-type"] = o.mimeType
}
if !o.lastModified.IsZero() {
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
}
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -5,7 +5,6 @@ package api
import (
"fmt"
"net/url"
"reflect"
"strconv"
"time"
@@ -137,25 +136,8 @@ type Link struct {
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
func (l *Link) Valid() bool {
if l == nil || l.URL == "" {
return false
}
// Primary validation: check URL's expire query parameter
if u, err := url.Parse(l.URL); err == nil {
if expireStr := u.Query().Get("expire"); expireStr != "" {
// Try parsing as Unix timestamp (seconds)
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
expireTime := time.Unix(expireInt, 0)
return time.Now().Add(10 * time.Second).Before(expireTime)
}
}
}
// Fallback validation: use the Expire field if URL parsing didn't work
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL

View File

@@ -1,99 +0,0 @@
package api
import (
"fmt"
"testing"
"time"
)
// TestLinkValid tests the Link.Valid method for various scenarios
func TestLinkValid(t *testing.T) {
tests := []struct {
name string
link *Link
expected bool
desc string
}{
{
name: "nil link",
link: nil,
expected: false,
desc: "nil link should be invalid",
},
{
name: "empty URL",
link: &Link{URL: ""},
expected: false,
desc: "empty URL should be invalid",
},
{
name: "valid URL with future expire parameter",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
},
expected: true,
desc: "URL with future expire parameter should be valid",
},
{
name: "expired URL with past expire parameter",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
},
expected: false,
desc: "URL with past expire parameter should be invalid",
},
{
name: "URL expire parameter takes precedence over Expire field",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
},
expected: true,
desc: "URL expire parameter should take precedence over Expire field",
},
{
name: "URL expire parameter within 10 second buffer should be invalid",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
},
expected: false,
desc: "URL expire parameter within 10 second buffer should be invalid",
},
{
name: "fallback to Expire field when no URL expire parameter",
link: &Link{
URL: "https://example.com/file",
Expire: Time(time.Now().Add(time.Hour)),
},
expected: true,
desc: "should fallback to Expire field when URL has no expire parameter",
},
{
name: "fallback to Expire field when URL expire parameter is invalid",
link: &Link{
URL: "https://example.com/file?expire=invalid",
Expire: Time(time.Now().Add(time.Hour)),
},
expected: true,
desc: "should fallback to Expire field when URL expire parameter is unparseable",
},
{
name: "invalid when both URL expire and Expire field are expired",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
Expire: Time(time.Now().Add(-time.Hour)),
},
expected: false,
desc: "should be invalid when both URL expire and Expire field are expired",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.link.Valid()
if result != tt.expected {
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
}
})
}
}

View File

@@ -1508,30 +1508,8 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
}
if new.File == nil {
return nil, fmt.Errorf("invalid response: %+v", new)
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if new.Task != nil {
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
}
})()
// Note: The API might automatically append a numbered suffix to the filename,
// even if a file with the same name does not exist in the target directory.
if upName := f.opt.Enc.ToStandardName(new.File.Name); leaf != upName {
return nil, fserrors.NoRetryError(fmt.Errorf("uploaded file name mismatch: expected %q, got %q", leaf, upName))
}
// early return; in case of zero-byte objects or uploaded by matched gcid
if new.File.Phase == api.PhaseTypeComplete {
} else if new.File.Phase == api.PhaseTypeComplete {
// early return; in case of zero-byte objects
if acc, ok := in.(*accounting.Account); ok && acc != nil {
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
@@ -1541,6 +1519,18 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
return new.File, nil
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
})()
if uploadType == api.UploadTypeForm && new.Form != nil {
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
@@ -1552,9 +1542,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
if err != nil {
return nil, fmt.Errorf("failed to upload: %w", err)
}
if new.Task == nil {
return new.File, nil
}
return new.File, f.waitTask(ctx, new.Task.ID)
}

View File

@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err
}
usage = &fs.Usage{
Used: fs.NewUsageValue(info.SpaceUsed),
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
}
return usage, nil
}

View File

@@ -13,8 +13,6 @@ import (
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
"github.com/henrybear327/go-proton-api"
"github.com/pquerna/otp/totp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -89,17 +87,6 @@ The value can also be provided with --protondrive-2fa=000000
The 2FA code of your proton drive account if the account is set up with
two-factor authentication`,
Required: false,
}, {
Name: "otp_secret_key",
Help: `The OTP secret key
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
The OTP secret key of your proton drive account if the account is set up with
two-factor authentication`,
Required: false,
Sensitive: true,
IsPassword: true,
}, {
Name: clientUIDKey,
Help: "Client uid key (internal use only)",
@@ -204,7 +191,6 @@ type Options struct {
Password string `config:"password"`
MailboxPassword string `config:"mailbox_password"`
TwoFA string `config:"2fa"`
OtpSecretKey string `config:"otp_secret_key"`
// advanced
Enc encoder.MultiEncoder `config:"encoding"`
@@ -370,15 +356,7 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
config.FirstLoginCredential.Username = opt.Username
config.FirstLoginCredential.Password = opt.Password
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
config.FirstLoginCredential.TwoFA = opt.TwoFA
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
if err != nil {
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
}
config.FirstLoginCredential.TwoFA = code
}
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
if err != nil {
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
@@ -417,14 +395,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
if opt.OtpSecretKey != "" {
var err error
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
if err != nil {
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
}
}
ci := fs.GetConfig(ctx)
root = strings.Trim(root, "/")

View File

@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize

File diff suppressed because it is too large Load Diff

View File

@@ -248,47 +248,6 @@ func TestMergeDeleteMarkers(t *testing.T) {
}
}
func TestRemoveAWSChunked(t *testing.T) {
ps := func(s string) *string {
return &s
}
tests := []struct {
name string
in *string
want *string
}{
{"nil", nil, nil},
{"empty", ps(""), nil},
{"only aws", ps("aws-chunked"), nil},
{"leading aws", ps("aws-chunked, gzip"), ps("gzip")},
{"trailing aws", ps("gzip, aws-chunked"), ps("gzip")},
{"middle aws", ps("gzip, aws-chunked, br"), ps("gzip,br")},
{"case insensitive", ps("GZip, AwS-ChUnKeD, Br"), ps("GZip,Br")},
{"duplicates", ps("aws-chunked , aws-chunked"), nil},
{"no aws normalize spaces", ps(" gzip , br "), ps(" gzip , br ")},
{"surrounding spaces", ps(" aws-chunked "), nil},
{"no change", ps("gzip, br"), ps("gzip, br")},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := removeAWSChunked(tc.in)
check := func(want, got *string) {
t.Helper()
if tc.want == nil {
assert.Nil(t, got)
} else {
require.NotNil(t, got)
assert.Equal(t, *tc.want, *got)
}
}
check(tc.want, got)
// Idempotent
got2 := removeAWSChunked(got)
check(got, got2)
})
}
}
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()

View File

@@ -1863,9 +1863,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
free := vfsStats.FreeSpace()
used := total - free
return &fs.Usage{
Total: fs.NewUsageValue(total),
Used: fs.NewUsageValue(used),
Free: fs.NewUsageValue(free),
Total: fs.NewUsageValue(int64(total)),
Used: fs.NewUsageValue(int64(used)),
Free: fs.NewUsageValue(int64(free)),
}, nil
} else if err != nil {
if errors.Is(err, os.ErrNotExist) {

View File

@@ -1,99 +0,0 @@
package smb
import (
"context"
"fmt"
"os"
"sync"
"github.com/cloudsoda/go-smb2"
"golang.org/x/sync/errgroup"
)
// FsInterface defines the methods that filePool needs from Fs
type FsInterface interface {
getConnection(ctx context.Context, share string) (*conn, error)
putConnection(pc **conn, err error)
removeSession()
}
type file struct {
*smb2.File
c *conn
}
type filePool struct {
ctx context.Context
fs FsInterface
share string
path string
mu sync.Mutex
pool []*file
}
func newFilePool(ctx context.Context, fs FsInterface, share, path string) *filePool {
return &filePool{
ctx: ctx,
fs: fs,
share: share,
path: path,
}
}
func (p *filePool) get() (*file, error) {
p.mu.Lock()
if len(p.pool) > 0 {
f := p.pool[len(p.pool)-1]
p.pool = p.pool[:len(p.pool)-1]
p.mu.Unlock()
return f, nil
}
p.mu.Unlock()
c, err := p.fs.getConnection(p.ctx, p.share)
if err != nil {
return nil, err
}
fl, err := c.smbShare.OpenFile(p.path, os.O_WRONLY, 0o644)
if err != nil {
p.fs.putConnection(&c, err)
return nil, fmt.Errorf("failed to open: %w", err)
}
return &file{File: fl, c: c}, nil
}
func (p *filePool) put(f *file, err error) {
if f == nil {
return
}
if err != nil {
_ = f.Close()
p.fs.putConnection(&f.c, err)
return
}
p.mu.Lock()
p.pool = append(p.pool, f)
p.mu.Unlock()
}
func (p *filePool) drain() error {
p.mu.Lock()
files := p.pool
p.pool = nil
p.mu.Unlock()
g, _ := errgroup.WithContext(p.ctx)
for _, f := range files {
g.Go(func() error {
err := f.Close()
p.fs.putConnection(&f.c, err)
return err
})
}
return g.Wait()
}

View File

@@ -1,228 +0,0 @@
package smb
import (
"context"
"errors"
"sync"
"testing"
"github.com/cloudsoda/go-smb2"
"github.com/stretchr/testify/assert"
)
// Mock Fs that implements FsInterface
type mockFs struct {
mu sync.Mutex
putConnectionCalled bool
putConnectionErr error
getConnectionCalled bool
getConnectionErr error
getConnectionResult *conn
removeSessionCalled bool
}
func (m *mockFs) putConnection(pc **conn, err error) {
m.mu.Lock()
defer m.mu.Unlock()
m.putConnectionCalled = true
m.putConnectionErr = err
}
func (m *mockFs) getConnection(ctx context.Context, share string) (*conn, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.getConnectionCalled = true
if m.getConnectionErr != nil {
return nil, m.getConnectionErr
}
if m.getConnectionResult != nil {
return m.getConnectionResult, nil
}
return &conn{}, nil
}
func (m *mockFs) removeSession() {
m.mu.Lock()
defer m.mu.Unlock()
m.removeSessionCalled = true
}
func (m *mockFs) isPutConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionCalled
}
func (m *mockFs) getPutConnectionErr() error {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionErr
}
func (m *mockFs) isGetConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.getConnectionCalled
}
func newMockFs() *mockFs {
return &mockFs{}
}
// Helper function to create a mock file
func newMockFile() *file {
return &file{
File: &smb2.File{},
c: &conn{},
}
}
// Test filePool creation
func TestNewFilePool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
share := "testshare"
path := "/test/path"
pool := newFilePool(ctx, fs, share, path)
assert.NotNil(t, pool)
assert.Equal(t, ctx, pool.ctx)
assert.Equal(t, fs, pool.fs)
assert.Equal(t, share, pool.share)
assert.Equal(t, path, pool.path)
assert.Empty(t, pool.pool)
}
// Test getting file from pool when pool has files
func TestFilePool_Get_FromPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add a mock file to the pool
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
// Get file from pool
f, err := pool.get()
assert.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, mockFile, f)
assert.Empty(t, pool.pool)
}
// Test getting file when pool is empty
func TestFilePool_Get_EmptyPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
// Set up the mock to return an error from getConnection
// This tests that the pool calls getConnection when empty
fs.getConnectionErr = errors.New("connection failed")
pool := newFilePool(ctx, fs, "testshare", "test/path")
// This should call getConnection and return the error
f, err := pool.get()
assert.Error(t, err)
assert.Nil(t, f)
assert.True(t, fs.isGetConnectionCalled())
assert.Equal(t, "connection failed", err.Error())
}
// Test putting file successfully
func TestFilePool_Put_Success(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, nil)
assert.Len(t, pool.pool, 1)
assert.Equal(t, mockFile, pool.pool[0])
}
// Test putting file with error
func TestFilePool_Put_WithError(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, errors.New("write error"))
// Should call putConnection with error
assert.True(t, fs.isPutConnectionCalled())
assert.Equal(t, errors.New("write error"), fs.getPutConnectionErr())
assert.Empty(t, pool.pool)
}
// Test putting nil file
func TestFilePool_Put_NilFile(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Should not panic
pool.put(nil, nil)
pool.put(nil, errors.New("some error"))
assert.Empty(t, pool.pool)
}
// Test draining pool with files
func TestFilePool_Drain_WithFiles(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add mock files to pool
mockFile1 := newMockFile()
mockFile2 := newMockFile()
pool.pool = append(pool.pool, mockFile1, mockFile2)
// Before draining
assert.Len(t, pool.pool, 2)
_ = pool.drain()
assert.Empty(t, pool.pool)
}
// Test concurrent access to pool
func TestFilePool_ConcurrentAccess(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
const numGoroutines = 10
for range numGoroutines {
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
}
// Test concurrent get operations
done := make(chan bool, numGoroutines)
for range numGoroutines {
go func() {
defer func() { done <- true }()
f, err := pool.get()
if err == nil {
pool.put(f, nil)
}
}()
}
for range numGoroutines {
<-done
}
// Pool should be in a consistent after the concurrence access
assert.Len(t, pool.pool, numGoroutines)
}

View File

@@ -3,7 +3,6 @@ package smb
import (
"context"
"errors"
"fmt"
"io"
"os"
@@ -192,9 +191,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
// if root is empty or ends with / (must be a directory)
isRootDir := isPathDir(root)
root = strings.Trim(root, "/")
f := &Fs{
@@ -221,11 +217,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if share == "" || dir == "" {
return f, nil
}
// Skip stat check if root is already a directory
if isRootDir {
return f, nil
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return nil, err
@@ -503,82 +494,22 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return nil, err
}
bs := stat.BlockSize()
bs := int64(stat.BlockSize())
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * stat.TotalBlockCount()),
Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()),
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
}
return usage, nil
}
type smbWriterAt struct {
pool *filePool
closed bool
closeMu sync.Mutex
wg sync.WaitGroup
}
func (w *smbWriterAt) WriteAt(p []byte, off int64) (int, error) {
w.closeMu.Lock()
if w.closed {
w.closeMu.Unlock()
return 0, errors.New("writer already closed")
}
w.wg.Add(1)
w.closeMu.Unlock()
defer w.wg.Done()
f, err := w.pool.get()
if err != nil {
return 0, fmt.Errorf("failed to get file from pool: %w", err)
}
n, writeErr := f.WriteAt(p, off)
w.pool.put(f, writeErr)
if writeErr != nil {
return n, fmt.Errorf("failed to write at offset %d: %w", off, writeErr)
}
return n, writeErr
}
func (w *smbWriterAt) Close() error {
w.closeMu.Lock()
defer w.closeMu.Unlock()
if w.closed {
return nil
}
w.closed = true
// Wait for all pending writes to finish
w.wg.Wait()
var errs []error
// Drain the pool
if err := w.pool.drain(); err != nil {
errs = append(errs, fmt.Errorf("failed to drain file pool: %w", err))
}
// Remove session
w.pool.fs.removeSession()
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
var err error
o := &Object{
fs: f,
remote: remote,
@@ -588,42 +519,27 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, fs.ErrorIsDir
}
err := o.fs.ensureDirectory(ctx, share, filename)
err = o.fs.ensureDirectory(ctx, share, filename)
if err != nil {
return nil, fmt.Errorf("failed to make parent directories: %w", err)
}
smbPath := o.fs.toSambaPath(filename)
filename = o.fs.toSambaPath(filename)
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
// One-time truncate
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return nil, err
}
file, err := cn.smbShare.OpenFile(smbPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
o.fs.putConnection(&cn, err)
return nil, err
return nil, fmt.Errorf("failed to open: %w", err)
}
if size > 0 {
if truncateErr := file.Truncate(size); truncateErr != nil {
_ = file.Close()
o.fs.putConnection(&cn, truncateErr)
return nil, fmt.Errorf("failed to truncate file: %w", truncateErr)
}
}
if closeErr := file.Close(); closeErr != nil {
o.fs.putConnection(&cn, closeErr)
return nil, fmt.Errorf("failed to close file after truncate: %w", closeErr)
}
o.fs.putConnection(&cn, nil)
// Add a new session
o.fs.addSession()
return &smbWriterAt{
pool: newFilePool(ctx, o.fs, share, smbPath),
}, nil
return fl, nil
}
// Shutdown the backend, closing any background tasks and any
@@ -902,11 +818,6 @@ func ensureSuffix(s, suffix string) string {
return s + suffix
}
// isPathDir determines if a path represents a directory based on trailing slash
func isPathDir(path string) bool {
return path == "" || strings.HasSuffix(path, "/")
}
func trimPathPrefix(s, prefix string) string {
// we need to clean the paths to make tests pass!
s = betterPathClean(s)

View File

@@ -1,41 +0,0 @@
// Unit tests for internal SMB functions
package smb
import "testing"
// TestIsPathDir tests the isPathDir function logic
func TestIsPathDir(t *testing.T) {
tests := []struct {
path string
expected bool
}{
// Empty path should be considered a directory
{"", true},
// Paths with trailing slash should be directories
{"/", true},
{"share/", true},
{"share/dir/", true},
{"share/dir/subdir/", true},
// Paths without trailing slash should not be directories
{"share", false},
{"share/dir", false},
{"share/dir/file", false},
{"share/dir/subdir/file", false},
// Edge cases
{"share//", true},
{"share///", true},
{"share/dir//", true},
}
for _, tt := range tests {
t.Run(tt.path, func(t *testing.T) {
result := isPathDir(tt.path)
if result != tt.expected {
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
}
})
}
}

View File

@@ -561,21 +561,6 @@ func (f *Fs) setRoot(root string) {
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// Fetch the base container's policy to be used if/when we need to create a
// segments container to ensure we use the same policy.
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
err := f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err := f.c.Container(ctx, container)
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
return nil, err
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
@@ -605,7 +590,6 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
f.opt.UseSegmentsContainer.Valid = true
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
}
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
@@ -789,20 +773,21 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return fs.ErrorListBucketRequired
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
return callback(entry)
entries = append(entries, entry)
return nil
})
if err != nil {
return err
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return nil
return entries, nil
}
// listContainers lists the containers
@@ -833,46 +818,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return list.Flush()
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1148,13 +1101,6 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
container: dstContainer,
}
if f.opt.UseSegmentsContainer.Value {
if f.opt.StoragePolicy == "" {
_, err = f.fetchStoragePolicy(ctx, dstContainer)
if err != nil {
return nil, err
}
}
su.container += segmentsContainerSuffix
err = f.makeContainer(ctx, su.container)
if err != nil {
@@ -1704,7 +1650,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -76,7 +76,6 @@ func (f *Fs) testNoChunk(t *testing.T) {
// Additional tests that aren't in the framework
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
t.Run("NoChunk", f.testNoChunk)
t.Run("WithChunk", f.testWithChunk)
t.Run("WithChunkFail", f.testWithChunkFail)
@@ -196,50 +195,4 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
require.Equal(t, obj.Size(), objTarget.Size())
}
func (f *Fs) testPolicyDiscovery(t *testing.T) {
ctx := context.TODO()
container := "testPolicyDiscovery-1"
// Reset the policy so we can test if it is populated.
f.opt.StoragePolicy = ""
err := f.makeContainer(ctx, container)
require.NoError(t, err)
_, err = f.fetchStoragePolicy(ctx, container)
require.NoError(t, err)
// Default policy for SAIO image is 1replica.
assert.Equal(t, "1replica", f.opt.StoragePolicy)
// Create a container using a non-default policy, and check to ensure
// that the created segments container uses the same non-default policy.
policy := "Policy-1"
container = "testPolicyDiscovery-2"
f.opt.StoragePolicy = policy
err = f.makeContainer(ctx, container)
require.NoError(t, err)
// Reset the policy so we can test if it is populated, and set to the
// non-default policy.
f.opt.StoragePolicy = ""
_, err = f.fetchStoragePolicy(ctx, container)
require.NoError(t, err)
assert.Equal(t, policy, f.opt.StoragePolicy)
// Test that when a segmented upload container is made, the newly
// created container inherits the non-default policy of the base
// container.
f.opt.StoragePolicy = ""
f.opt.UseSegmentsContainer.Value = true
su, err := f.newSegmentedUpload(ctx, container, "")
require.NoError(t, err)
// The container name we expected?
segmentsContainer := container + segmentsContainerSuffix
assert.Equal(t, segmentsContainer, su.container)
// The policy we expected?
f.opt.StoragePolicy = ""
_, err = f.fetchStoragePolicy(ctx, su.container)
require.NoError(t, err)
assert.Equal(t, policy, f.opt.StoragePolicy)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,7 +1,7 @@
// Package common defines code common to the union and the policies
//
// These need to be defined in a separate package to avoid import loops
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
package common
import "github.com/rclone/rclone/fs"

View File

@@ -21,6 +21,7 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, u := range upstreams {
u := u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)

View File

@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for checksum := range strings.SplitSeq(checksums, " ") {
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Create test TLS certificates for use with rclone.
OUT_DIR="${OUT_DIR:-./tls-test}"
CA_SUBJ="${CA_SUBJ:-/C=US/ST=Test/L=Test/O=Test Org/OU=Test Unit/CN=Test Root CA}"
SERVER_CN="${SERVER_CN:-localhost}"
CLIENT_CN="${CLIENT_CN:-Test Client}"
CLIENT_KEY_PASS="${CLIENT_KEY_PASS:-testpassword}"
CA_DAYS=${CA_DAYS:-3650}
SERVER_DAYS=${SERVER_DAYS:-825}
CLIENT_DAYS=${CLIENT_DAYS:-825}
mkdir -p "$OUT_DIR"
cd "$OUT_DIR"
# Create OpenSSL config
# CA extensions
cat > ca_openssl.cnf <<'EOF'
[ ca_ext ]
basicConstraints = critical, CA:true, pathlen:1
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
EOF
# Server extensions (SAN includes localhost + loopback IP)
cat > server_openssl.cnf <<EOF
[ server_ext ]
basicConstraints = critical, CA:false
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = ${SERVER_CN}
IP.1 = 127.0.0.1
EOF
# Client extensions (for mTLS client auth)
cat > client_openssl.cnf <<'EOF'
[ client_ext ]
basicConstraints = critical, CA:false
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
EOF
echo "Create CA key, CSR, and self-signed CA cert"
if [ ! -f ca.key.pem ]; then
openssl genrsa -out ca.key.pem 4096
chmod 600 ca.key.pem
fi
openssl req -new -key ca.key.pem -subj "$CA_SUBJ" -out ca.csr.pem
openssl x509 -req -in ca.csr.pem -signkey ca.key.pem \
-sha256 -days "$CA_DAYS" \
-extfile ca_openssl.cnf -extensions ca_ext \
-out ca.cert.pem
echo "Create server key (NO PASSWORD) and cert signed by CA"
openssl genrsa -out server.key.pem 2048
chmod 600 server.key.pem
openssl req -new -key server.key.pem -subj "/CN=${SERVER_CN}" -out server.csr.pem
openssl x509 -req -in server.csr.pem \
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
-out server.cert.pem -days "$SERVER_DAYS" -sha256 \
-extfile server_openssl.cnf -extensions server_ext
echo "Create client key (PASSWORD-PROTECTED), CSR, and cert"
openssl genrsa -aes256 -passout pass:"$CLIENT_KEY_PASS" -out client.key.pem 2048
chmod 600 client.key.pem
openssl req -new -key client.key.pem -passin pass:"$CLIENT_KEY_PASS" \
-subj "/CN=${CLIENT_CN}" -out client.csr.pem
openssl x509 -req -in client.csr.pem \
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
-out client.cert.pem -days "$CLIENT_DAYS" -sha256 \
-extfile client_openssl.cnf -extensions client_ext
echo "Verify chain"
openssl verify -CAfile ca.cert.pem server.cert.pem client.cert.pem
echo "Done"
echo
echo "Summary"
echo "-------"
printf "%-22s %s\n" \
"CA key:" "ca.key.pem" \
"CA cert:" "ca.cert.pem" \
"Server key:" "server.key.pem (no password)" \
"Server CSR:" "server.csr.pem" \
"Server cert:" "server.cert.pem (SAN: ${SERVER_CN}, 127.0.0.1)" \
"Client key:" "client.key.pem (encrypted)" \
"Client CSR:" "client.csr.pem" \
"Client cert:" "client.cert.pem" \
"Client key password:" "$CLIENT_KEY_PASS"
echo
echo "Test rclone server"
echo
echo "rclone serve http -vv --addr :8080 --cert ${OUT_DIR}/server.cert.pem --key ${OUT_DIR}/server.key.pem --client-ca ${OUT_DIR}/ca.cert.pem ."
echo
echo "Test rclone client"
echo
echo "rclone lsf :http: --http-url 'https://localhost:8080' --ca-cert ${OUT_DIR}/ca.cert.pem --client-cert ${OUT_DIR}/client.cert.pem --client-key ${OUT_DIR}/client.key.pem --client-pass \$(rclone obscure $CLIENT_KEY_PASS)"
echo

View File

@@ -1,159 +0,0 @@
//go:build ignore
package main
import (
"bytes"
"cmp"
"context"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/runs"
"github.com/stretchr/testify/assert/yaml"
)
var path = flag.String("path", "./docs/content/", "root path")
const (
configFile = "fstest/test_all/config.yaml"
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
)
func main() {
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
if err != nil {
fs.Errorf(*path, "error replacing ignores: %v", err)
}
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
if err != nil {
fs.Errorf(*path, "error replacing failures: %v", err)
}
}
// replaceBetween replaces the text between startSep and endSep with fn()
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
if err != nil {
return err
}
doc := string(b)
before, after, found := strings.Cut(doc, startSep)
if !found {
return fmt.Errorf("could not find: %v", startSep)
}
_, after, found = strings.Cut(after, endSep)
if !found {
return fmt.Errorf("could not find: %v", endSep)
}
replaceSection, err := fn()
if err != nil {
return err
}
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
if err != nil {
return err
}
return nil
}
// getIgnores updates the list of ignores from config.yaml
func getIgnores() (string, error) {
config, err := parseConfig()
if err != nil {
return "", fmt.Errorf("failed to parse config: %v", err)
}
s := ""
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
return cmp.Compare(a.Remote, b.Remote)
})
for _, backend := range config.Backends {
include := false
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
include = true
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
for _, ignore := range backend.Ignore {
if strings.Contains(strings.ToLower(ignore), "bisync") {
if !include { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
include = true
s += fmt.Sprintf(" - `%s`\n", ignore)
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
}
}
}
return s, nil
}
// getFailures updates the list of currently failing tests from the integration tests server
func getFailures() (string, error) {
var buf bytes.Buffer
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
if err != nil {
return "", err
}
r := runs.Report{}
err = json.Unmarshal(buf.Bytes(), &r)
if err != nil {
return "", fmt.Errorf("failed to unmarshal json: %v", err)
}
s := ""
for _, run := range r.Failed {
for i, t := range run.FailedTests {
if strings.Contains(strings.ToLower(t), "bisync") {
if i == 0 { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
}
url := integrationTestsHTMLURL + run.TrialName
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
break
}
}
}
}
s += fmt.Sprintf("- Updated: %v", r.DateTime)
return s, nil
}
// parseConfig reads and parses the config.yaml file
func parseConfig() (*runs.Config, error) {
d, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to read config file: %w", err)
}
config := &runs.Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
return nil, fmt.Errorf("failed to parse config file: %w", err)
}
return config, nil
}

View File

@@ -1,17 +0,0 @@
#!/usr/bin/env bash
#
# Run markdown linting locally
set -e
# Workflow
build=.github/workflows/build.yml
# Globs read from from $build
globs=$(awk '/- name: Check Markdown format/{f=1;next} f && /globs:/{f=2;next} f==2 && NF{if($1=="-"){exit} print $0}' $build)
if [ -z "$globs" ]; then
echo "Error: No globs found in Check Markdown step in $build" >&2
exit 1
fi
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs

View File

@@ -33,7 +33,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
}
logMap = map[string]string{}
logs = []string{}
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
for _, line := range bytes.Split(out, []byte{'\n'}) {
if len(line) == 0 {
continue
}

View File

@@ -51,52 +51,47 @@ output. The output is typically used, free, quota and trash contents.
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
` + "```text" + `
Total: 17 GiB
Used: 7.444 GiB
Free: 1.315 GiB
Trashed: 100.000 MiB
Other: 8.241 GiB
` + "```" + `
Total: 17 GiB
Used: 7.444 GiB
Free: 1.315 GiB
Trashed: 100.000 MiB
Other: 8.241 GiB
Where the fields are:
- Total: Total size available.
- Used: Total size used.
- Free: Total space available to this user.
- Trashed: Total space used by trash.
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
- Objects: Total number of objects in the storage.
* Total: Total size available.
* Used: Total size used.
* Free: Total space available to this user.
* Trashed: Total space used by trash.
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
* Objects: Total number of objects in the storage.
All sizes are in number of bytes.
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
` + "```text" + `
Total: 18253611008
Used: 7993453766
Free: 1411001220
Trashed: 104857602
Other: 8849156022
` + "```" + `
Total: 18253611008
Used: 7993453766
Free: 1411001220
Trashed: 104857602
Other: 8849156022
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
` + "```json" + `
{
"total": 18253611008,
"used": 7993453766,
"trashed": 104857602,
"other": 8849156022,
"free": 1411001220
}
` + "```" + `
{
"total": 18253611008,
"used": 7993453766,
"trashed": 104857602,
"other": 8849156022,
"free": 1411001220
}
Not all backends print all fields. Information is not included if it is not
provided by a backend. Where the value is unlimited it is omitted.
Some backends does not support the ` + "`rclone about`" + ` command at all,
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
see complete list in [documentation](https://rclone.org/overview/#optional-features).
`,
Annotations: map[string]string{
"versionIntroduced": "v1.41",
// "groups": "",

View File

@@ -23,23 +23,21 @@ func init() {
}
var commandDefinition = &cobra.Command{
Use: "authorize <backendname> [base64_json_blob | client_id client_secret]",
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
Short: `Remote authorization.`,
Long: `Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser. Use as instructed by rclone config.
See also the [remote setup documentation](/remote_setup).
rclone from a machine with a browser - use as instructed by
rclone config.
The command requires 1-3 arguments:
- Name of a backend (e.g. "drive", "s3")
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank
string is provided as an argument to this flag, the default template is used.`,
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
},

View File

@@ -10,7 +10,7 @@ import (
func TestAuthorizeCommand(t *testing.T) {
// Test that the Use string is correctly formatted
if commandDefinition.Use != "authorize <backendname> [base64_json_blob | client_id client_secret]" {
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
}
@@ -26,7 +26,7 @@ func TestAuthorizeCommand(t *testing.T) {
}
helpOutput := buf.String()
if !strings.Contains(helpOutput, "authorize <backendname>") {
if !strings.Contains(helpOutput, "authorize <fs name>") {
t.Errorf("Help output doesn't contain correct usage information")
}
}

View File

@@ -37,33 +37,26 @@ see the backend docs for definitions.
You can discover what commands a backend implements by using
` + "```sh" + `
rclone backend help remote:
rclone backend help <backendname>
` + "```" + `
rclone backend help remote:
rclone backend help <backendname>
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
for more info).
` + "```sh" + `
rclone backend features remote:
` + "```" + `
rclone backend features remote:
Pass options to the backend command with -o. This should be key=value or key, e.g.:
` + "```sh" + `
rclone backend stats remote:path stats -o format=json -o long
` + "```" + `
rclone backend stats remote:path stats -o format=json -o long
Pass arguments to the backend by placing them on the end of the line
` + "```sh" + `
rclone backend cleanup remote:path file1 file2 file3
` + "```" + `
rclone backend cleanup remote:path file1 file2 file3
Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs.`,
[backend/command](/rc/#backend-command) in the rc docs.
`,
Annotations: map[string]string{
"versionIntroduced": "v1.52",
"groups": "Important",

View File

@@ -4,19 +4,15 @@ package bilib
import (
"bytes"
"log/slog"
"sync"
"github.com/rclone/rclone/fs/log"
)
// CaptureOutput runs a function capturing its output at log level INFO.
func CaptureOutput(fun func()) []byte {
var mu sync.Mutex
buf := &bytes.Buffer{}
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
log.Handler.SetOutput(func(level slog.Level, text string) {
mu.Lock()
defer mu.Unlock()
buf.WriteString(text)
})
defer func() {
@@ -24,7 +20,5 @@ func CaptureOutput(fun func()) []byte {
log.Handler.SetLevel(oldLevel)
}()
fun()
mu.Lock()
defer mu.Unlock()
return buf.Bytes()
}

View File

@@ -176,8 +176,6 @@ var (
// Flag -refresh-times helps with Dropbox tests failing with message
// "src and dst identical but can't set mod time without deleting and re-uploading"
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10
)
// bisyncTest keeps all test data in a single place
@@ -228,18 +226,6 @@ var color = bisync.Color
// TestMain drives the tests
func TestMain(m *testing.M) {
bisync.LogTZ = time.UTC
ci := fs.GetConfig(context.TODO())
ciSave := *ci
defer func() {
*ci = ciSave
}()
// need to set context.TODO() here as we cannot pass a ctx to fs.LogLevelPrintf
ci.LogLevel = fs.LogLevelInfo
if *argDebug {
ci.LogLevel = fs.LogLevelDebug
}
fstest.Initialise()
fstest.TestMain(m)
}
@@ -252,8 +238,7 @@ func TestBisyncRemoteLocal(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, remote, *argRemote2)
testBisync(t, remote, *argRemote2)
}
// Path1 is local, Path2 is remote
@@ -265,8 +250,7 @@ func TestBisyncLocalRemote(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, *argRemote2, remote)
testBisync(t, *argRemote2, remote)
}
// Path1 and Path2 are both different directories on remote
@@ -276,44 +260,14 @@ func TestBisyncRemoteRemote(t *testing.T) {
fs.Logf(nil, "remote: %v", remote)
require.NoError(t, err)
defer cleanup()
ctx, _ := fs.AddConfig(context.TODO())
testBisync(ctx, t, remote, remote)
}
// make sure rc can cope with running concurrent jobs
func TestBisyncConcurrent(t *testing.T) {
if !isLocal(*fstest.RemoteName) {
t.Skip("TestBisyncConcurrent is skipped on non-local")
}
if *argTestCase != "" && *argTestCase != "basic" {
t.Skip("TestBisyncConcurrent only tests 'basic'")
}
if *argPCount < 2 {
t.Skip("TestBisyncConcurrent is pointless with -pcount < 2")
}
if *argGolden {
t.Skip("skip TestBisyncConcurrent when goldenizing")
}
oldArgTestCase := argTestCase
*argTestCase = "basic"
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
t.Cleanup(func() {
argTestCase = oldArgTestCase
*ignoreLogs = false
})
for i := 0; i < *argPCount; i++ {
t.Run(fmt.Sprintf("test%v", i), testParallel)
}
}
func testParallel(t *testing.T) {
t.Parallel()
TestBisyncRemoteRemote(t)
testBisync(t, remote, remote)
}
// TestBisync is a test engine for bisync test cases.
func testBisync(ctx context.Context, t *testing.T, path1, path2 string) {
func testBisync(t *testing.T, path1, path2 string) {
ctx := context.Background()
fstest.Initialise()
ci := fs.GetConfig(ctx)
ciSave := *ci
defer func() {
@@ -322,9 +276,8 @@ func testBisync(ctx context.Context, t *testing.T, path1, path2 string) {
if *argRefreshTimes {
ci.RefreshTimes = true
}
bisync.ColorsLock.Lock()
bisync.Colors = true
bisync.ColorsLock.Unlock()
time.Local = bisync.TZ
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
baseDir, err := os.Getwd()
@@ -476,7 +429,6 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
// Prepare initial content
b.cleanupCase(ctx)
ctx = accounting.WithStatsGroup(ctx, random.String(8))
fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty
fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision())
initFs, err := cache.Get(ctx, b.initDir)
@@ -522,7 +474,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
require.NoError(b.t, err)
b.step = 0
b.stopped = false
for line := range strings.SplitSeq(string(scenBuf), "\n") {
for _, line := range strings.Split(string(scenBuf), "\n") {
comment := strings.Index(line, "#")
if comment != -1 {
line = line[:comment]
@@ -611,15 +563,11 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
}
}
func isLocal(remote string) bool {
return bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",")
}
// makeTempRemote creates temporary folder and makes a filesystem
// if a local path is provided, it's ignored (the test will run under system temp)
func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string) (f, parent fs.Fs, path, canon string) {
var err error
if isLocal(remote) {
if bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",") {
if remote != "" && !strings.HasPrefix(remote, "local") && *fstest.RemoteName != "" {
b.t.Fatalf(`Missing ":" in remote %q. Use "local" to test with local filesystem.`, remote)
}
@@ -650,14 +598,20 @@ func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string)
}
func (b *bisyncTest) cleanupCase(ctx context.Context) {
_ = operations.Purge(ctx, b.fs1, "")
_ = operations.Purge(ctx, b.fs2, "")
// Silence "directory not found" errors from the ftp backend
_ = bilib.CaptureOutput(func() {
_ = operations.Purge(ctx, b.fs1, "")
})
_ = bilib.CaptureOutput(func() {
_ = operations.Purge(ctx, b.fs2, "")
})
_ = os.RemoveAll(b.workDir)
accounting.Stats(ctx).ResetCounters()
}
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
var fsrc, fdst fs.Fs
ctx = accounting.WithStatsGroup(ctx, random.String(8))
accounting.Stats(ctx).ResetErrors()
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
ci := fs.GetConfig(ctx)
@@ -665,6 +619,11 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
defer func() {
*ci = ciSave
}()
ci.LogLevel = fs.LogLevelInfo
if b.debug {
ci.LogLevel = fs.LogLevelDebug
}
testFunc := func() {
src := filepath.Join(b.dataDir, "file7.txt")
@@ -936,7 +895,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
// splitLine splits scenario line into tokens and performs
// substitutions that involve whitespace or control chars.
func splitLine(line string) (args []string) {
for s := range strings.FieldsSeq(line) {
for _, s := range strings.Fields(line) {
b := []byte(whitespaceReplacer.Replace(s))
b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte {
c, _ := strconv.ParseUint(string(b[5:7]), 16, 8)
@@ -994,12 +953,6 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
b.fs2.Features().Disable("Copy") // API has longstanding bug for conflictBehavior=replace https://github.com/rclone/rclone/issues/4590
b.fs2.Features().Disable("Move")
}
if strings.HasPrefix(b.fs1.String(), "sftp") {
b.fs1.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
}
if strings.HasPrefix(b.fs2.String(), "sftp") {
b.fs2.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
}
if strings.Contains(strings.ToLower(fs.ConfigString(b.fs1)), "mailru") || strings.Contains(strings.ToLower(fs.ConfigString(b.fs2)), "mailru") {
fs.GetConfig(ctx).TPSLimit = 10 // https://github.com/rclone/rclone/issues/7768#issuecomment-2060888980
}
@@ -1018,33 +971,21 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
}
// test if modtimes are writeable
testSetModtime := func(f fs.Fs) {
ctx := accounting.WithStatsGroup(ctx, random.String(8)) // keep stats separate
in := bytes.NewBufferString("modtime_write_test")
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
obj, err := f.Put(ctx, in, objinfo)
require.NoError(b.t, err)
if !f.Features().IsLocal {
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
}
err = obj.SetModTime(ctx, initDate)
if err == fs.ErrorCantSetModTime {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
}
if err == fs.ErrorCantSetModTimeWithoutDelete { // transfers stats expected to differ on this backend
logReplacements = append(logReplacements, `^.*There was nothing to transfer.*$`, dropMe)
} else {
require.NoError(b.t, err)
}
if !f.Features().IsLocal {
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
if b.testCase != "nomodtime" {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
}
}
err = obj.Remove(ctx)
require.NoError(b.t, err)
}
if b.testCase != "nomodtime" {
testSetModtime(b.fs1)
testSetModtime(b.fs2)
}
testSetModtime(b.fs1)
testSetModtime(b.fs2)
if b.testCase == "normalization" || b.testCase == "extended_char_paths" || b.testCase == "extended_filenames" {
// test whether remote is capable of running test
@@ -1488,9 +1429,6 @@ func (b *bisyncTest) compareResults() int {
resultText := b.mangleResult(b.workDir, file, false)
if fileType(file) == "log" {
if *ignoreLogs {
continue
}
// save mangled logs so difference is easier on eyes
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
resultFile := filepath.Join(b.logDir, "mangled.result.log")
@@ -1513,7 +1451,7 @@ func (b *bisyncTest) compareResults() int {
fs.Log(nil, divider)
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") {
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
fs.Logf(nil, "| %s", strings.TrimSpace(line))
}
}
@@ -1636,14 +1574,6 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
`^.*not equal on recheck.*$`, dropMe,
)
}
if b.ignoreBlankHash || !b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5) {
// if either side lacks support for md5, need to ignore the "nothing to transfer" log,
// as sync may in fact need to transfer, where it would otherwise skip based on hash or just update modtime.
// transfer stats will also differ in fs.ErrorCantSetModTimeWithoutDelete scenario, and where --download-hash is needed.
logReplacements = append(logReplacements,
`^.*There was nothing to transfer.*$`, dropMe,
)
}
rep := logReplacements
if b.testCase == "dry_run" {
rep = append(rep, dryrunReplacements...)

View File

@@ -16,17 +16,15 @@ import (
"github.com/rclone/rclone/fs/operations"
)
type bisyncCheck = struct {
hashType hash.Type
fsrc, fdst fs.Fs
fcrypt *crypt.Fs
}
var hashType hash.Type
var fsrc, fdst fs.Fs
var fcrypt *crypt.Fs
// WhichCheck determines which CheckFn we should use based on the Fs types
// It is more robust and accurate than Check because
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
// it returns the *operations.CheckOpt with the CheckFn set.
func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
ci := fs.GetConfig(ctx)
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
@@ -42,32 +40,32 @@ func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *o
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
// if both are crypt or only dst is crypt
b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne()
if b.check.hashType != hash.None {
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None {
// use cryptcheck
b.check.fsrc = opt.Fsrc
b.check.fdst = opt.Fdst
b.check.fcrypt = FdstCrypt
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = b.CryptCheckFn
fsrc = opt.Fsrc
fdst = opt.Fdst
fcrypt = FdstCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = CryptCheckFn
return opt
}
} else if srcIsCrypt && !dstIsCrypt {
// if only src is crypt
b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
if b.check.hashType != hash.None {
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None {
// use reverse cryptcheck
b.check.fsrc = opt.Fdst
b.check.fdst = opt.Fsrc
b.check.fcrypt = FsrcCrypt
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = b.ReverseCryptCheckFn
fsrc = opt.Fdst
fdst = opt.Fsrc
fcrypt = FsrcCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = ReverseCryptCheckFn
return opt
}
}
// if we've gotten this far, neither check or cryptcheck will work, so use --download
fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
opt.Check = DownloadCheckFn
return opt
}
@@ -90,17 +88,17 @@ func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool,
}
// CryptCheckFn is a slightly modified version of CryptCheck
func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
cryptDst := dst.(*crypt.Object)
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType)
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
if err != nil {
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
}
if underlyingHash == "" {
return false, true, nil
}
cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType)
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
if err != nil {
return true, false, fmt.Errorf("error computing hash: %w", err)
}
@@ -108,10 +106,10 @@ func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (diffe
return false, true, nil
}
if cryptHash != underlyingHash {
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash)
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.Debugf(src, "%s", err.Error())
// using same error msg as CheckFn so integration tests match
err = fmt.Errorf("%v differ", b.check.hashType)
err = fmt.Errorf("%v differ", hashType)
fs.Errorf(src, "%s", err.Error())
return true, false, nil
}
@@ -120,8 +118,8 @@ func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (diffe
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
// result: src is crypt, dst is non-crypt
func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
return b.CryptCheckFn(ctx, src, dst)
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
return CryptCheckFn(ctx, src, dst)
}
// DownloadCheckFn is a slightly modified version of Check with --download
@@ -139,7 +137,7 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
if filterCheck.HaveFilesFrom() {
fs.Debugf(nil, "There are potential conflicts to check.")
opt, close, checkopterr := check.GetCheckOpt(fs1, fs2)
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
if checkopterr != nil {
b.critical = true
b.retryable = true
@@ -150,16 +148,16 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
opt.Match = new(bytes.Buffer)
opt = b.WhichCheck(ctxCheck, opt)
opt = WhichCheck(ctxCheck, opt)
fs.Infof(nil, "Checking potential conflicts...")
check := operations.CheckFn(ctxCheck, opt)
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
// reset error count, because we don't want to count check errors as bisync errors
//reset error count, because we don't want to count check errors as bisync errors
accounting.Stats(ctxCheck).ResetErrors()
// return the list of identical files to check against later
//return the list of identical files to check against later
if len(fmt.Sprint(opt.Match)) > 0 {
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
}
@@ -175,14 +173,14 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
// WhichEqual is similar to WhichCheck, but checks a single object.
// Returns true if the objects are equal, false if they differ or if we don't know
func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
if checkopterr != nil {
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
}
defer close()
opt = b.WhichCheck(ctx, opt)
opt = WhichCheck(ctx, opt)
differ, noHash, err := opt.Check(ctx, dst, src)
if err != nil {
fs.Errorf(src, "failed to check: %v", err)
@@ -219,7 +217,7 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
equal, skipHash = timeSizeEqualFn()
if equal && !skipHash {
whichHashType := func(f fs.Info) hash.Type {
ht := b.getHashType(f.Name())
ht := getHashType(f.Name())
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
ht = f.Hashes().GetOne()
}
@@ -227,9 +225,9 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
}
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
srcHash, _ = b.tryDownloadHash(ctx, src, srcHash)
dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash)
equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
}
if equal {
logger(ctx, operations.Match, src, dst, nil)
@@ -249,7 +247,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
// note that arg order is path1, path2, regardless of src/dst
path1, path2 := b.resyncWhichIsWhich(src, dst)
if sizeDiffers(path1.Size(), path2.Size()) {
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), b.opt.ResyncMode)
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
}
@@ -259,7 +257,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
// note that arg order is path1, path2, regardless of src/dst
path1, path2 := b.resyncWhichIsWhich(src, dst)
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), b.opt.ResyncMode)
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
if !b.resyncWinningPathToEqual(winningPath) {
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2

View File

@@ -115,7 +115,6 @@ func (x *CheckSyncMode) Type() string {
}
// Opt keeps command line options
// internal functions should use b.opt instead
var Opt Options
func init() {
@@ -141,7 +140,7 @@ func init() {
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync.", "")
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
@@ -163,6 +162,7 @@ var commandDefinition = &cobra.Command{
Annotations: map[string]string{
"versionIntroduced": "v1.58",
"groups": "Filter,Copy,Important",
"status": "Beta",
},
RunE: func(command *cobra.Command, args []string) error {
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
@@ -190,6 +190,7 @@ var commandDefinition = &cobra.Command{
}
}
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
cmd.Run(false, true, command, func() error {
err := Bisync(ctx, fs1, fs2, &opt)
if err == ErrBisyncAborted {

View File

@@ -28,7 +28,7 @@ type CompareOpt = struct {
DownloadHash bool
}
func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) {
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
ci := fs.GetConfig(ctx)
// defaults
@@ -120,25 +120,25 @@ func sizeDiffers(a, b int64) bool {
// returns true if the hashes are definitely different.
// returns false if equal, or if either is unknown.
func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if stringA == "" || stringB == "" {
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if a == "" || b == "" {
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB)
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
}
return false
}
if ht1 != ht2 {
if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
return false
}
}
return stringA != stringB
return a != b
}
// chooses hash type, giving priority to types both sides have in common
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash
downloadHash = b.opt.Compare.DownloadHash
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
} else {
@@ -177,7 +177,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
}
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
b.opt.Compare.HashType2 = hash.None
b.opt.Compare.HashType1 = hash.None
} else {
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
if b.opt.Compare.HashType2 != hash.None {
@@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
return nil
}
var CompareFlag CompareOpt // for exclusions
opts := strings.SplitSeq(b.opt.CompareFlag, ",")
for opt := range opts {
opts := strings.Split(b.opt.CompareFlag, ",")
for _, opt := range opts {
switch strings.ToLower(strings.TrimSpace(opt)) {
case "size":
b.opt.Compare.Size = true
@@ -268,15 +268,13 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
return nil
}
// b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
type downloadHashOpt struct {
downloadHash bool
downloadHashWarn mutex.Once
firstDownloadHash mutex.Once
}
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
var downloadHash bool
var downloadHashWarn mutex.Once
var firstDownloadHash mutex.Once
func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
if hashVal != "" || !b.downloadHashOpt.downloadHash {
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
if hashVal != "" || !downloadHash {
return hashVal, nil
}
obj, ok := o.(fs.Object)
@@ -285,14 +283,14 @@ func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal
return hashVal, fs.ErrorObjectNotFound
}
if o.Size() < 0 {
b.downloadHashOpt.downloadHashWarn.Do(func() {
downloadHashWarn.Do(func() {
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
})
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
return hashVal, hash.ErrUnsupported
}
b.downloadHashOpt.firstDownloadHash.Do(func() {
firstDownloadHash.Do(func() {
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
})
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")

View File

@@ -219,7 +219,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
}
}
if b.opt.Compare.Checksum {
if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
d |= deltaHash
@@ -346,7 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
if d2.is(deltaOther) {
// if size or hash differ, skip this, as we already know they're not equal
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
(b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
} else {
checkit := func(filename string) {
@@ -393,10 +393,10 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// if files are identical, leave them alone instead of renaming
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
b.march.ls1.getPut(file, skippedDirs1)
b.march.ls2.getPut(file, skippedDirs2)
ls1.getPut(file, skippedDirs1)
ls2.getPut(file, skippedDirs2)
b.debugFn(file, func() {
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName)))
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
})
} else {
equal := matches.Has(file)
@@ -409,16 +409,16 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// the Path1 version is deemed "correct" in this scenario
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
copy1to2.Add(file)
} else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) {
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
// Path2 is newer
b.indent("Path2", p1, "Queue copy to Path1")
copy2to1.Add(b.march.ls2.getTryAlias(file, alias))
copy2to1.Add(ls2.getTryAlias(file, alias))
} else {
// Path1 is newer
b.indent("Path1", p2, "Queue copy to Path2")
copy1to2.Add(b.march.ls1.getTryAlias(file, alias))
copy1to2.Add(ls1.getTryAlias(file, alias))
}
} else {
fs.Infof(nil, "Files are equal! Skipping: %s", file)
@@ -590,10 +590,10 @@ func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
fullMap1 := map[string]string{} // [transformedname]originalname
fullMap2 := map[string]string{} // [transformedname]originalname
for _, name := range b.march.ls1.list {
for _, name := range ls1.list {
fullMap1[transform(name)] = name
}
for _, name := range b.march.ls2.list {
for _, name := range ls2.list {
fullMap2[transform(name)] = name
}

View File

@@ -35,7 +35,8 @@ var rcHelp = makeHelp(`This takes the following parameters
- removeEmptyDirs - remove empty directories at the final cleanup step
- filtersFile - read filtering patterns from a file
- ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
Use at your own risk!
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
@@ -51,15 +52,14 @@ var longHelp = shortHelp + makeHelp(`
bidirectional cloud sync solution in rclone.
It retains the Path1 and Path2 filesystem listings from the prior run.
On each successive run it will:
- list files on Path1 and Path2, and check for changes on each side.
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
- Propagate changes on Path1 to Path2, and vice-versa.
Bisync is considered an **advanced command**, so use with care.
Bisync is **in beta** and is considered an **advanced command**, so use with care.
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
before using, or data loss can result. Questions can be asked in the
[Rclone Forum](https://forum.rclone.org/).
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
See [full bisync description](https://rclone.org/bisync/) for details.`)
See [full bisync description](https://rclone.org/bisync/) for details.
`)

View File

@@ -42,14 +42,10 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
// timeFormat defines time format used in listings
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
// TZ defines time zone used in listings
var (
// TZ defines time zone used in listings
TZ = time.UTC
tzLocal = false
// LogTZ defines time zone used in logs (which may be different than that used in listings).
// time.Local by default, but we force UTC on tests to make them deterministic regardless of tester's location.
LogTZ = time.Local
)
// fileInfo describes a file
@@ -202,8 +198,8 @@ func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool
equal = false
}
}
if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum {
if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
if b.opt.Compare.Checksum && !ignoreListingChecksum {
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
equal = false
}
@@ -247,7 +243,7 @@ func (ls *fileList) sort() {
}
// save will save listing to a file.
func (ls *fileList) save(listing string) error {
func (ls *fileList) save(ctx context.Context, listing string) error {
file, err := os.Create(listing)
if err != nil {
return err
@@ -434,6 +430,7 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
}
fulllisting, err = b.loadListingNum(listingNum)
if err != nil {
b.critical = true
b.retryable = true
@@ -609,11 +606,6 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
}
}
if srcNewName != "" { // if it was renamed and not deleted
if new == nil { // should not happen. log error and debug info
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
continue
}
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
}
@@ -707,7 +699,8 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
// clear stats so we only do this once
accounting.Stats(ctx).RemoveDoneTransfers()
accounting.MaxCompletedTransfers = 0
accounting.Stats(ctx).PruneTransfers()
}
if b.DebugName != "" {
@@ -715,9 +708,9 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
}
// update files
err = srcList.save(srcListing)
err = srcList.save(ctx, srcListing)
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
err = dstList.save(dstListing)
err = dstList.save(ctx, dstListing)
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
return err
@@ -748,7 +741,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
if hashType != hash.None {
hashVal, _ = obj.Hash(ctxRecheck, hashType)
}
hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal)
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
}
var modtime time.Time
if b.opt.Compare.Modtime {
@@ -762,7 +755,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
for _, dstObj := range dstObjs {
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
putObj(srcObj, srcList)
putObj(dstObj, dstList)
resolved = append(resolved, srcObj.Remote())
@@ -776,7 +769,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
// skip and error during --resync, as rollback is not possible
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
if b.opt.Resync {
err := errors.New("no dstObj match or files not equal")
err = errors.New("no dstObj match or files not equal")
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
} else {
toRollback = append(toRollback, srcObj.Remote())

View File

@@ -16,17 +16,16 @@ import (
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
type lockFileOpt struct {
stopRenewal func()
data struct {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}
}
var stopRenewal func()
func (b *bisyncRun) setLockFile() (err error) {
var data = struct {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}{}
func (b *bisyncRun) setLockFile() error {
b.lockFile = ""
b.setLockFileExpiration()
if !b.opt.DryRun {
@@ -46,23 +45,24 @@ func (b *bisyncRun) setLockFile() (err error) {
}
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
b.renewLockFile()
b.lockFileOpt.stopRenewal = b.startLockRenewal()
stopRenewal = b.startLockRenewal()
}
return nil
}
func (b *bisyncRun) removeLockFile() (err error) {
func (b *bisyncRun) removeLockFile() {
if b.lockFile != "" {
b.lockFileOpt.stopRenewal()
err = os.Remove(b.lockFile)
if err == nil {
stopRenewal()
errUnlock := os.Remove(b.lockFile)
if errUnlock == nil {
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
} else if err == nil {
err = errUnlock
} else {
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err)
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
}
b.lockFile = "" // block removing it again
}
return err
}
func (b *bisyncRun) setLockFileExpiration() {
@@ -77,18 +77,18 @@ func (b *bisyncRun) setLockFileExpiration() {
func (b *bisyncRun) renewLockFile() {
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
b.lockFileOpt.data.Session = b.basePath
b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid())
b.lockFileOpt.data.TimeRenewed = time.Now()
b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
data.Session = b.basePath
data.PID = strconv.Itoa(os.Getpid())
data.TimeRenewed = time.Now()
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
// save data file
df, err := os.Create(b.lockFile)
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true)
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
if b.opt.MaxLock < basicallyforever {
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires)
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
}
}
}
@@ -99,7 +99,7 @@ func (b *bisyncRun) lockFileIsExpired() bool {
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
dec := json.NewDecoder(rdf)
for {
if err := dec.Decode(&b.lockFileOpt.data); err != nil {
if err := dec.Decode(&data); err != nil {
if err != io.EOF {
fs.Errorf(b.lockFile, "err: %v", err)
}
@@ -107,14 +107,14 @@ func (b *bisyncRun) lockFileIsExpired() bool {
}
}
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) {
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires)
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
markFailed(b.listing2)
return true
}
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second))
prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo)
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
}
return false
}

View File

@@ -6,7 +6,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
@@ -68,15 +67,10 @@ func quotePath(path string) string {
}
// Colors controls whether terminal colors are enabled
var (
Colors bool
ColorsLock sync.Mutex
)
var Colors bool
// Color handles terminal colors for bisync
func Color(style string, s string) string {
ColorsLock.Lock()
defer ColorsLock.Unlock()
if !Colors {
return s
}
@@ -86,8 +80,6 @@ func Color(style string, s string) string {
// ColorX handles terminal colors for bisync
func ColorX(style string, s string) string {
ColorsLock.Lock()
defer ColorsLock.Unlock()
if !Colors {
return s
}

View File

@@ -12,20 +12,18 @@ import (
"github.com/rclone/rclone/fs/march"
)
type bisyncMarch struct {
ls1 *fileList
ls2 *fileList
err error
firstErr error
marchAliasLock sync.Mutex
marchLsLock sync.Mutex
marchErrLock sync.Mutex
marchCtx context.Context
}
var ls1 = newFileList()
var ls2 = newFileList()
var err error
var firstErr error
var marchAliasLock sync.Mutex
var marchLsLock sync.Mutex
var marchErrLock sync.Mutex
var marchCtx context.Context
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
ci := fs.GetConfig(ctx)
b.march.marchCtx = ctx
marchCtx = ctx
b.setupListing()
fs.Debugf(b, "starting to march!")
@@ -41,31 +39,31 @@ func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList,
NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization,
}
b.march.err = m.Run(ctx)
err = m.Run(ctx)
fs.Debugf(b, "march completed. err: %v", b.march.err)
if b.march.err == nil {
b.march.err = b.march.firstErr
fs.Debugf(b, "march completed. err: %v", err)
if err == nil {
err = firstErr
}
if b.march.err != nil {
b.handleErr("march", "error during march", b.march.err, true, true)
if err != nil {
b.handleErr("march", "error during march", err, true, true)
b.abort = true
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// save files
if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None {
b.march.ls1.hash = hash.MD5
if b.opt.Compare.DownloadHash && ls1.hash == hash.None {
ls1.hash = hash.MD5
}
if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None {
b.march.ls2.hash = hash.MD5
if b.opt.Compare.DownloadHash && ls2.hash == hash.None {
ls2.hash = hash.MD5
}
b.march.err = b.march.ls1.save(b.newListing1)
b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true)
b.march.err = b.march.ls2.save(b.newListing2)
b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true)
err = ls1.save(ctx, b.newListing1)
b.handleErr(ls1, "error saving ls1 from march", err, true, true)
err = ls2.save(ctx, b.newListing2)
b.handleErr(ls2, "error saving ls2 from march", err, true, true)
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// SrcOnly have an object which is on path1 only
@@ -85,9 +83,9 @@ func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
// Match is called when object exists on both path1 and path2 (whether equal or not)
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
fs.Debugf(o1, "both path1 and path2")
b.march.marchAliasLock.Lock()
marchAliasLock.Lock()
b.aliases.Add(o1.Remote(), o2.Remote())
b.march.marchAliasLock.Unlock()
marchAliasLock.Unlock()
b.parse(o1, true)
b.parse(o2, false)
return isDir(o1)
@@ -121,76 +119,76 @@ func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
}
func (b *bisyncRun) setupListing() {
b.march.ls1 = newFileList()
b.march.ls2 = newFileList()
ls1 = newFileList()
ls2 = newFileList()
// note that --ignore-listing-checksum is different from --ignore-checksum
// and we already checked it when we set b.opt.Compare.HashType1 and 2
b.march.ls1.hash = b.opt.Compare.HashType1
b.march.ls2.hash = b.opt.Compare.HashType2
ls1.hash = b.opt.Compare.HashType1
ls2.hash = b.opt.Compare.HashType2
}
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
defer func() {
tr.Done(b.march.marchCtx, nil)
tr.Done(marchCtx, nil)
}()
var (
hashVal string
hashErr error
)
ls := b.whichLs(isPath1)
ls := whichLs(isPath1)
hashType := ls.hash
if hashType != hash.None {
hashVal, hashErr = o.Hash(b.march.marchCtx, hashType)
b.march.marchErrLock.Lock()
if b.march.firstErr == nil {
b.march.firstErr = hashErr
hashVal, hashErr = o.Hash(marchCtx, hashType)
marchErrLock.Lock()
if firstErr == nil {
firstErr = hashErr
}
b.march.marchErrLock.Unlock()
marchErrLock.Unlock()
}
hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal)
b.march.marchErrLock.Lock()
if b.march.firstErr == nil {
b.march.firstErr = hashErr
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal)
marchErrLock.Lock()
if firstErr == nil {
firstErr = hashErr
}
if b.march.firstErr != nil {
b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true)
if firstErr != nil {
b.handleErr(hashType, "error hashing during march", firstErr, false, true)
}
b.march.marchErrLock.Unlock()
marchErrLock.Unlock()
var modtime time.Time
if b.opt.Compare.Modtime {
modtime = o.ModTime(b.march.marchCtx).In(TZ)
modtime = o.ModTime(marchCtx).In(TZ)
}
id := "" // TODO: ID(o)
flags := "-" // "-" for a file and "d" for a directory
b.march.marchLsLock.Lock()
marchLsLock.Lock()
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
b.march.marchLsLock.Unlock()
marchLsLock.Unlock()
}
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
defer func() {
tr.Done(b.march.marchCtx, nil)
tr.Done(marchCtx, nil)
}()
ls := b.whichLs(isPath1)
ls := whichLs(isPath1)
var modtime time.Time
if b.opt.Compare.Modtime {
modtime = o.ModTime(b.march.marchCtx).In(TZ)
modtime = o.ModTime(marchCtx).In(TZ)
}
id := "" // TODO
flags := "d" // "-" for a file and "d" for a directory
b.march.marchLsLock.Lock()
marchLsLock.Lock()
ls.put(o.Remote(), -1, modtime, "", id, flags)
b.march.marchLsLock.Unlock()
marchLsLock.Unlock()
}
func (b *bisyncRun) whichLs(isPath1 bool) *fileList {
ls := b.march.ls1
func whichLs(isPath1 bool) *fileList {
ls := ls1
if !isPath1 {
ls = b.march.ls2
ls = ls2
}
return ls
}
@@ -208,7 +206,7 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
ci := fs.GetConfig(ctxCheckFile)
b.march.marchCtx = ctxCheckFile
marchCtx = ctxCheckFile
b.setupListing()
fs.Debugf(b, "starting to march!")
@@ -225,18 +223,18 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization,
}
b.march.err = m.Run(ctxCheckFile)
err = m.Run(ctxCheckFile)
fs.Debugf(b, "march completed. err: %v", b.march.err)
if b.march.err == nil {
b.march.err = b.march.firstErr
fs.Debugf(b, "march completed. err: %v", err)
if err == nil {
err = firstErr
}
if b.march.err != nil {
b.handleErr("march", "error during findCheckFiles", b.march.err, true, true)
if err != nil {
b.handleErr("march", "error during findCheckFiles", err, true, true)
b.abort = true
}
return b.march.ls1, b.march.ls2, b.march.err
return ls1, ls2, err
}
// ID returns the ID of the Object if known, or "" if not

View File

@@ -51,11 +51,6 @@ type bisyncRun struct {
lockFile string
renames renames
resyncIs1to2 bool
march bisyncMarch
check bisyncCheck
queueOpt bisyncQueueOpt
downloadHashOpt downloadHashOpt
lockFileOpt lockFileOpt
}
type queues struct {
@@ -69,6 +64,7 @@ type queues struct {
// Bisync handles lock file, performs bisync run and checks exit status
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
defer resetGlobals()
opt := *optArg // ensure that input is never changed
b := &bisyncRun{
fs1: fs1,
@@ -87,9 +83,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
opt.OrigBackupDir = ci.BackupDir
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
ColorsLock.Lock()
Colors = true
ColorsLock.Unlock()
}
err = b.setCompareDefaults(ctx)
@@ -99,7 +93,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
b.setResyncDefaults()
err = b.setResolveDefaults()
err = b.setResolveDefaults(ctx)
if err != nil {
return err
}
@@ -130,8 +124,6 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
return err
}
b.queueOpt.logger = operations.NewLoggerOpt()
// Handle SIGINT
var finaliseOnce gosync.Once
@@ -169,7 +161,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
markFailed(b.listing1)
markFailed(b.listing2)
}
err = b.removeLockFile()
b.removeLockFile()
}
})
}
@@ -179,10 +171,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
// run bisync
err = b.runLocked(ctx)
removeLockErr := b.removeLockFile()
if err == nil {
err = removeLockErr
}
b.removeLockFile()
b.CleanupCompleted = true
if b.InGracefulShutdown {
@@ -273,7 +262,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
if opt.Resync {
return b.resync(fctx)
return b.resync(octx, fctx)
}
// Check for existence of prior Path1 and Path2 listings
@@ -308,7 +297,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
}
fs.Infof(nil, "Building Path1 and Path2 listings")
b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx)
ls1, ls2, err = b.makeMarchListing(fctx)
if err != nil || accounting.Stats(fctx).Errored() {
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
b.critical = true
@@ -318,7 +307,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path1 deltas relative to the prior sync
fs.Infof(nil, "Path1 checking for diffs")
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1")
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1")
if err != nil {
return err
}
@@ -326,7 +315,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path2 deltas relative to the prior sync
fs.Infof(nil, "Path2 checking for diffs")
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2")
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2")
if err != nil {
return err
}
@@ -400,7 +389,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
newl1, _ := b.loadListing(b.newListing1)
newl2, _ := b.loadListing(b.newListing2)
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
}
b.saveOldListings()
// save new listings
@@ -564,7 +553,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
return ctx
}
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) {
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
return err
@@ -597,7 +586,7 @@ func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs)
return nil
}
func (b *bisyncRun) checkSyntax() (err error) {
func (b *bisyncRun) checkSyntax() error {
// check for odd number of quotes in path, usually indicating an escaping issue
path1 := bilib.FsPath(b.fs1)
path2 := bilib.FsPath(b.fs2)
@@ -645,3 +634,25 @@ func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
}
return false
}
// mainly to make sure tests don't interfere with each other when running more than one
func resetGlobals() {
downloadHash = false
logger = operations.NewLoggerOpt()
ignoreListingChecksum = false
ignoreListingModtime = false
hashTypes = nil
queueCI = nil
hashType = 0
fsrc, fdst = nil, nil
fcrypt = nil
Opt = Options{}
once = gosync.Once{}
downloadHashWarn = gosync.Once{}
firstDownloadHash = gosync.Once{}
ls1 = newFileList()
ls2 = newFileList()
err = nil
firstErr = nil
marchCtx = nil
}

View File

@@ -51,19 +51,19 @@ func (rs *ResultsSlice) has(name string) bool {
return false
}
type bisyncQueueOpt struct {
logger operations.LoggerOpt
var (
logger = operations.NewLoggerOpt()
lock mutex.Mutex
once mutex.Once
ignoreListingChecksum bool
ignoreListingModtime bool
hashTypes map[string]hash.Type
queueCI *fs.ConfigInfo
}
)
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
func (b *bisyncRun) getHashType(fname string) hash.Type {
ht, ok := b.queueOpt.hashTypes[fname]
func getHashType(fname string) hash.Type {
ht, ok := hashTypes[fname]
if ok {
return ht
}
@@ -106,9 +106,9 @@ func altName(name string, src, dst fs.DirEntry) string {
}
// WriteResults is Bisync's LoggerFn
func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
b.queueOpt.lock.Lock()
defer b.queueOpt.lock.Unlock()
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
lock.Lock()
defer lock.Unlock()
opt := operations.GetLoggerOpt(ctx)
result := Results{
@@ -131,14 +131,14 @@ func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, sr
result.Flags = "-"
if side != nil {
result.Size = side.Size()
if !b.queueOpt.ignoreListingModtime {
if !ignoreListingModtime {
result.Modtime = side.ModTime(ctx).In(TZ)
}
if !b.queueOpt.ignoreListingChecksum {
if !ignoreListingChecksum {
sideObj, ok := side.(fs.ObjectInfo)
if ok {
result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name()))
result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash)
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
}
}
@@ -159,8 +159,8 @@ func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, sr
}
prettyprint(result, "writing result", fs.LogLevelDebug)
if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) {
b.queueOpt.once.Do(func() {
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
once.Do(func() {
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
})
}
@@ -189,14 +189,14 @@ func ReadResults(results io.Reader) []Results {
// for setup code shared by both fastCopy and resyncDir
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
b.queueOpt.queueCI = fs.GetConfig(ctx)
b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum
b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime
b.queueOpt.hashTypes = map[string]hash.Type{
queueCI = fs.GetConfig(ctx)
ignoreListingChecksum = b.opt.IgnoreListingChecksum
ignoreListingModtime = !b.opt.Compare.Modtime
hashTypes = map[string]hash.Type{
b.fs1.Name(): b.opt.Compare.HashType1,
b.fs2.Name(): b.opt.Compare.HashType2,
}
b.queueOpt.logger.LoggerFn = b.WriteResults
logger.LoggerFn = WriteResults
overridingEqual := false
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
overridingEqual = true
@@ -209,15 +209,15 @@ func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
fs.Debugf(nil, "overriding equal")
ctx = b.EqualFn(ctx)
}
ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger)
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
// set here in case !b.opt.Compare.Modtime
b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger)
queueCI = fs.GetConfig(ctxCopyLogger)
if b.opt.Compare.NoSlowHash {
b.queueOpt.queueCI.CheckSum = false
queueCI.CheckSum = false
}
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
b.queueOpt.queueCI.CheckSum = true
queueCI.CheckSum = true
}
}
return ctxCopyLogger
@@ -245,14 +245,14 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
}
}
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
b.testFn()
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(b.queueOpt.logger.JSON)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
lineFormat := "%s %8d %s %s %s %q\n"
@@ -292,9 +292,9 @@ func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results,
ctx = b.preCopy(ctx)
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(b.queueOpt.logger.JSON)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
return getResults, err

View File

@@ -77,7 +77,7 @@ func (conflictLoserChoices) Type() string {
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
var ConflictLoserList = Opt.ConflictLoser.Help()
func (b *bisyncRun) setResolveDefaults() error {
func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
if b.opt.ConflictLoser == ConflictLoserSkip {
b.opt.ConflictLoser = ConflictLoserNumber
}
@@ -135,7 +135,7 @@ type namePair struct {
newName string
}
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) {
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error {
winningPath := 0
if b.opt.ConflictResolve != PreferNone {
winningPath = b.conflictWinner(ds1, ds2, file, alias)
@@ -197,7 +197,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
// delete 2, copy 1 to 2
err = b.delete(ctxMove, r.path2, path2, b.fs2, 2, renameSkipped)
err = b.delete(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, renameSkipped)
if err != nil {
return err
}
@@ -207,7 +207,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
copy1to2.Add(r.path1.oldName)
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
// delete 1, copy 2 to 1
err = b.delete(ctxMove, r.path1, path1, b.fs1, 1, renameSkipped)
err = b.delete(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, renameSkipped)
if err != nil {
return err
}
@@ -261,15 +261,15 @@ func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
for i := startnum; i < math.MaxInt; i++ {
iStr := fmt.Sprint(i)
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
// make sure it still holds true with suffixes switched (it should)
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
fs.Debugf(file, "The first available suffix is: %s", iStr)
return i
}
@@ -280,10 +280,10 @@ func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias stri
// like numerate, but consider only one side's suffix (for when suffixes are different)
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
lsA, lsB := b.march.ls1, b.march.ls2
lsA, lsB := ls1, ls2
suffix := b.opt.ConflictSuffix1
if path == 2 {
lsA, lsB = b.march.ls2, b.march.ls1
lsA, lsB = ls2, ls1
suffix = b.opt.ConflictSuffix2
}
for i := startnum; i < math.MaxInt; i++ {
@@ -299,7 +299,7 @@ func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alia
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
}
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) {
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error {
if winningPath == thisPathNum {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
} else {
@@ -321,7 +321,7 @@ func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath,
return nil
}
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath string, thisFs fs.Fs, thisPathNum int, renameSkipped *bilib.Names) (err error) {
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error {
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
if !skip {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
@@ -359,17 +359,17 @@ func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string)
return 2
case PreferNewer, PreferOlder:
t1, t2 := ds1.time[remote1], ds2.time[remote2]
return b.resolveNewerOlder(t1, t2, remote1, b.opt.ConflictResolve)
return b.resolveNewerOlder(t1, t2, remote1, remote2, b.opt.ConflictResolve)
case PreferLarger, PreferSmaller:
s1, s2 := ds1.size[remote1], ds2.size[remote2]
return b.resolveLargerSmaller(s1, s2, remote1, b.opt.ConflictResolve)
return b.resolveLargerSmaller(s1, s2, remote1, remote2, b.opt.ConflictResolve)
default:
return 0
}
}
// returns the winning path number, or 0 if winner can't be determined
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer Prefer) int {
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string, prefer Prefer) int {
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
return 0
@@ -380,31 +380,31 @@ func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer P
}
if t1.After(t2) {
if prefer == PreferNewer {
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
return 1
} else if prefer == PreferOlder {
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
return 2
}
} else if t1.Before(t2) {
if prefer == PreferNewer {
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 2
} else if prefer == PreferOlder {
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 1
}
}
if t1.Equal(t2) {
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
return 0
}
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.In(LogTZ), t2.In(LogTZ)) // shouldn't happen unless prefer is of wrong type
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.Local(), t2.Local()) // shouldn't happen unless prefer is of wrong type
return 0
}
// returns the winning path number, or 0 if winner can't be determined
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1 string, prefer Prefer) int {
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1, remote2 string, prefer Prefer) int {
if s1 < 0 || s2 < 0 {
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
return 0

View File

@@ -20,6 +20,7 @@ func (b *bisyncRun) setResyncDefaults() {
}
if b.opt.ResyncMode != PreferNone {
b.opt.Resync = true
Opt.Resync = true // shouldn't be using this one, but set to be safe
}
// checks and warnings
@@ -40,18 +41,18 @@ func (b *bisyncRun) setResyncDefaults() {
// It will generate path1 and path2 listings,
// copy any unique files to the opposite path,
// and resolve any differing files according to the --resync-mode.
func (b *bisyncRun) resync(fctx context.Context) (err error) {
func (b *bisyncRun) resync(octx, fctx context.Context) error {
fs.Infof(nil, "Copying Path2 files to Path1")
// Save blank filelists (will be filled from sync results)
ls1 := newFileList()
ls2 := newFileList()
err = ls1.save(b.newListing1)
var ls1 = newFileList()
var ls2 = newFileList()
err = ls1.save(fctx, b.newListing1)
if err != nil {
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
b.abort = true
}
err = ls2.save(b.newListing2)
err = ls2.save(fctx, b.newListing2)
if err != nil {
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
b.abort = true

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -61,7 +59,6 @@ INFO : - Path1 Queue copy to Path2 - {
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -136,7 +133,6 @@ INFO : - Path1 Queue copy to Path2 - {
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -89,7 +87,6 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -21,9 +21,7 @@ INFO : Using filters file {workdir/}exclude-other-filtersfile.txt
INFO : Storing filters file hash to {workdir/}exclude-other-filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -138,9 +136,7 @@ INFO : Using filters file {workdir/}include-other-filtersfile.txt
INFO : Storing filters file hash to {workdir/}include-other-filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -92,9 +90,7 @@ INFO : Copying Path2 files to Path1
INFO : Checking access health
INFO : Found 2 matching ".chk_file" files on both paths
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -104,9 +102,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -23,9 +23,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -82,7 +80,7 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : subdir: Making directory
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -126,7 +124,6 @@ INFO : Path2: 1 changes:  0 new,  0 modified, 
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}RCLONE_TEST
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -151,9 +148,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -193,7 +188,6 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : subdir: Removing directory
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -27,9 +27,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}測試Русский ěáñ/" with Path2 "{path2/}測試Русский ěáñ/"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}測試Русский ěáñ/" vs Path2 "{path2/}測試Русский ěáñ/"
INFO : Bisync successful
@@ -86,9 +84,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -178,9 +174,7 @@ INFO : Using filters file {workdir/}測試_filtersfile.txt
INFO : Storing filters file hash to {workdir/}測試_filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -20,9 +20,7 @@ INFO : Using filters file {workdir/}filtersfile.flt
INFO : Storing filters file hash to {workdir/}filtersfile.flt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -83,9 +81,7 @@ INFO : Using filters file {workdir/}filtersfile.txt
INFO : Storing filters file hash to {workdir/}filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -150,9 +146,7 @@ INFO : Using filters file {workdir/}filtersfile.txt
INFO : Skipped storing filters file hash to {workdir/}filtersfile.txt.{hashtype} as --dry-run is set
INFO : Copying Path2 files to Path1
NOTICE: - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
NOTICE: - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -35,9 +33,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -86,7 +84,6 @@ INFO : - Path2 Queue delete - {
INFO : - Path2 Queue delete - {path2/}file4.txt
INFO : - Path2 Queue delete - {path2/}file5.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -86,7 +84,6 @@ INFO : - Path1 Queue delete - {
INFO : - Path1 Queue delete - {path1/}file4.txt
INFO : - Path1 Queue delete - {path1/}file5.txt
INFO : - Path2 Do queued copies to - Path1
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

Some files were not shown because too many files have changed in this diff Show More