mirror of
https://github.com/rclone/rclone.git
synced 2025-12-11 22:03:17 +00:00
Compare commits
175 Commits
feat/cache
...
v1.71-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2b292b31d | ||
|
|
4013f006b8 | ||
|
|
371fc61191 | ||
|
|
0d9a95125c | ||
|
|
386d3c5b4d | ||
|
|
3dceb72e9b | ||
|
|
30b016d8c0 | ||
|
|
b8e6d45482 | ||
|
|
dfdc69a3e1 | ||
|
|
0908c346bb | ||
|
|
f186b7554e | ||
|
|
4bbab927fa | ||
|
|
1c119f9330 | ||
|
|
089c4b1a69 | ||
|
|
b5ba737e14 | ||
|
|
15f727f0e3 | ||
|
|
01e1f90216 | ||
|
|
997c73b586 | ||
|
|
ccd01f1a68 | ||
|
|
63c0dc773c | ||
|
|
f1e6bffc14 | ||
|
|
1a98c36a73 | ||
|
|
931ab7c4db | ||
|
|
2b531ada34 | ||
|
|
45f45c987c | ||
|
|
d2351e60b6 | ||
|
|
f8de6b48f1 | ||
|
|
616a280aac | ||
|
|
e1833f4090 | ||
|
|
135d89d0f9 | ||
|
|
ea54bddbd5 | ||
|
|
baf6167930 | ||
|
|
1d91618d9e | ||
|
|
4a7e62b79c | ||
|
|
dcca477f39 | ||
|
|
7304ecaf18 | ||
|
|
c3932ecde1 | ||
|
|
c9df7b1cd7 | ||
|
|
3985496e5d | ||
|
|
0d2ef2eb20 | ||
|
|
836e19243d | ||
|
|
0a6cce1bc1 | ||
|
|
cffb6732a4 | ||
|
|
236f247c59 | ||
|
|
3b07f9d34d | ||
|
|
bad77c642f | ||
|
|
41eef6608b | ||
|
|
fc6bd9ff79 | ||
|
|
ee83cd214c | ||
|
|
2c2642a927 | ||
|
|
32eed8dd36 | ||
|
|
3f0e9f5fca | ||
|
|
cfd0d28742 | ||
|
|
e7a2b322ec | ||
|
|
d3a0805a2b | ||
|
|
d4edf8ac18 | ||
|
|
87d14b000a | ||
|
|
12bded980b | ||
|
|
6e0e76af9d | ||
|
|
6f9b2f7b9b | ||
|
|
f61d79396d | ||
|
|
9b22e38450 | ||
|
|
9e4fe18830 | ||
|
|
ae5cc1ab37 | ||
|
|
d4be38ec02 | ||
|
|
115cff3007 | ||
|
|
70b862f026 | ||
|
|
321cf23e9c | ||
|
|
7e8d4bd915 | ||
|
|
06f45e0ac0 | ||
|
|
4af2f01abc | ||
|
|
dd3fff6eae | ||
|
|
ca6631746a | ||
|
|
e5fe0b1476 | ||
|
|
4c5764204d | ||
|
|
d70f40229e | ||
|
|
05b13b47b5 | ||
|
|
ecd52aa809 | ||
|
|
269abb1aee | ||
|
|
d91cbb2626 | ||
|
|
9073d17313 | ||
|
|
cc20d93f47 | ||
|
|
cb1507fa96 | ||
|
|
b0b3b04b3b | ||
|
|
8d878d0a5f | ||
|
|
8d353039a6 | ||
|
|
4b777db20b | ||
|
|
16ad0c2aef | ||
|
|
e46dec2a94 | ||
|
|
2b54b63cb3 | ||
|
|
f2eb5f35f6 | ||
|
|
d9a36ef45c | ||
|
|
eade7710e7 | ||
|
|
e6470d998c | ||
|
|
0c0fb93111 | ||
|
|
3f60764bd4 | ||
|
|
8f84f91666 | ||
|
|
2c91772bf1 | ||
|
|
c3f721755d | ||
|
|
8a952583a5 | ||
|
|
fc5bd21e28 | ||
|
|
be73a10a97 | ||
|
|
7edf8eb233 | ||
|
|
99144dcbba | ||
|
|
8f90f830bd | ||
|
|
456108f29e | ||
|
|
f7968aad1c | ||
|
|
2a587d21c4 | ||
|
|
4b0df05907 | ||
|
|
a92af34825 | ||
|
|
8ffde402f6 | ||
|
|
117d8d9fdb | ||
|
|
5050f42b8b | ||
|
|
fcbcdea067 | ||
|
|
d4e68bf66b | ||
|
|
743d160fdd | ||
|
|
dc95f36bc1 | ||
|
|
d3e3af377a | ||
|
|
db4812fbfa | ||
|
|
ff9cbab5fa | ||
|
|
30d8ab5f2f | ||
|
|
d71a4195d6 | ||
|
|
64ed9b175f | ||
|
|
2b10340e4e | ||
|
|
3c596f8d11 | ||
|
|
6a9c221841 | ||
|
|
c49b24ff90 | ||
|
|
edbbfd1e86 | ||
|
|
0e0af7499c | ||
|
|
eb4fe3ef4c | ||
|
|
70eb0f21d9 | ||
|
|
12378bae27 | ||
|
|
3c08c4df3a | ||
|
|
897509ae10 | ||
|
|
0eb7ee2e16 | ||
|
|
c1ebfb7e04 | ||
|
|
3d62058693 | ||
|
|
122890799f | ||
|
|
65078d5846 | ||
|
|
92f304902d | ||
|
|
45477a6c7d | ||
|
|
79b549b5a4 | ||
|
|
318880b4ad | ||
|
|
75521dcf6e | ||
|
|
8bf20dd545 | ||
|
|
744bce1246 | ||
|
|
c817fc5c57 | ||
|
|
0bb4d0a985 | ||
|
|
a8605abd34 | ||
|
|
953fb4490b | ||
|
|
b17c3d18af | ||
|
|
b45580fa19 | ||
|
|
1c26f40078 | ||
|
|
667ad093eb | ||
|
|
2c369aedf5 | ||
|
|
7a0d5ab0b4 | ||
|
|
75582b804b | ||
|
|
73452551c6 | ||
|
|
cb3cf5068b | ||
|
|
428f518771 | ||
|
|
0411a41e11 | ||
|
|
07b37bcd12 | ||
|
|
0506826ff5 | ||
|
|
4fcd36a5ab | ||
|
|
b2f43f39ba | ||
|
|
074d73d12b | ||
|
|
6457bcf51e | ||
|
|
8d12519f3d | ||
|
|
8a7c401366 | ||
|
|
0aae8f346f | ||
|
|
e991328967 | ||
|
|
614d02a673 | ||
|
|
018ebdded5 | ||
|
|
fc08983d71 | ||
|
|
7b61084891 |
49
.github/workflows/build.yml
vendored
49
.github/workflows/build.yml
vendored
@@ -29,12 +29,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
|
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- job_name: linux
|
- job_name: linux
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
build_flags: '-include "^linux/"'
|
build_flags: '-include "^linux/"'
|
||||||
check: true
|
check: true
|
||||||
@@ -45,14 +45,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: linux_386
|
- job_name: linux_386
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
goarch: 386
|
goarch: 386
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
quicktest: true
|
quicktest: true
|
||||||
|
|
||||||
- job_name: mac_amd64
|
- job_name: mac_amd64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
build_flags: '-include "^darwin/amd64" -cgo'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
@@ -61,14 +61,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: mac_arm64
|
- job_name: mac_arm64
|
||||||
os: macos-latest
|
os: macos-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: 'cmount'
|
gotags: 'cmount'
|
||||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: windows
|
- job_name: windows
|
||||||
os: windows-latest
|
os: windows-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
gotags: cmount
|
gotags: cmount
|
||||||
cgo: '0'
|
cgo: '0'
|
||||||
build_flags: '-include "^windows/"'
|
build_flags: '-include "^windows/"'
|
||||||
@@ -78,14 +78,14 @@ jobs:
|
|||||||
|
|
||||||
- job_name: other_os
|
- job_name: other_os
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '>=1.24.0-rc.1'
|
go: '>=1.25.0-rc.1'
|
||||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||||
compile_all: true
|
compile_all: true
|
||||||
deploy: true
|
deploy: true
|
||||||
|
|
||||||
- job_name: go1.23
|
- job_name: go1.24
|
||||||
os: ubuntu-latest
|
os: ubuntu-latest
|
||||||
go: '1.23'
|
go: '1.24'
|
||||||
quicktest: true
|
quicktest: true
|
||||||
racequicktest: true
|
racequicktest: true
|
||||||
|
|
||||||
@@ -95,7 +95,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -216,7 +216,7 @@ jobs:
|
|||||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -224,7 +224,7 @@ jobs:
|
|||||||
id: setup-go
|
id: setup-go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.23.0-rc.1'
|
go-version: '>=1.24.0-rc.1'
|
||||||
check-latest: true
|
check-latest: true
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
@@ -239,13 +239,13 @@ jobs:
|
|||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
- name: Code quality test (Linux)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v8
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
- name: Code quality test (Windows)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "windows"
|
GOOS: "windows"
|
||||||
with:
|
with:
|
||||||
@@ -253,7 +253,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
- name: Code quality test (macOS)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "darwin"
|
GOOS: "darwin"
|
||||||
with:
|
with:
|
||||||
@@ -261,7 +261,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
- name: Code quality test (FreeBSD)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "freebsd"
|
GOOS: "freebsd"
|
||||||
with:
|
with:
|
||||||
@@ -269,7 +269,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
- name: Code quality test (OpenBSD)
|
||||||
uses: golangci/golangci-lint-action@v6
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "openbsd"
|
GOOS: "openbsd"
|
||||||
with:
|
with:
|
||||||
@@ -282,6 +282,17 @@ jobs:
|
|||||||
- name: Scan for vulnerabilities
|
- name: Scan for vulnerabilities
|
||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
|
- name: Check Markdown format
|
||||||
|
uses: DavidAnson/markdownlint-cli2-action@v20
|
||||||
|
with:
|
||||||
|
globs: |
|
||||||
|
CONTRIBUTING.md
|
||||||
|
MAINTAINERS.md
|
||||||
|
README.md
|
||||||
|
RELEASE.md
|
||||||
|
CODE_OF_CONDUCT.md
|
||||||
|
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
- name: Scan edits of autogenerated files
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
@@ -294,7 +305,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -302,7 +313,7 @@ jobs:
|
|||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.24.0-rc.1'
|
go-version: '>=1.25.0-rc.1'
|
||||||
|
|
||||||
- name: Set global environment variables
|
- name: Set global environment variables
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
df -h .
|
df -h .
|
||||||
|
|
||||||
- name: Checkout Repository
|
- name: Checkout Repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@@ -198,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Download Image Digests
|
- name: Download Image Digests
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v5
|
||||||
with:
|
with:
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
pattern: digests-*
|
pattern: digests-*
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ jobs:
|
|||||||
sudo rm -rf /usr/share/dotnet || true
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
df -h .
|
df -h .
|
||||||
- name: Checkout master
|
- name: Checkout master
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Build and publish docker plugin
|
- name: Build and publish docker plugin
|
||||||
|
|||||||
256
.golangci.yml
256
.golangci.yml
@@ -1,144 +1,146 @@
|
|||||||
# golangci-lint configuration options
|
version: "2"
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
|
# Configure the linter set. To avoid unexpected results the implicit default
|
||||||
|
# set is ignored and all the ones to use are explicitly enabled.
|
||||||
|
default: none
|
||||||
enable:
|
enable:
|
||||||
|
# Default
|
||||||
- errcheck
|
- errcheck
|
||||||
- goimports
|
|
||||||
- revive
|
|
||||||
- ineffassign
|
|
||||||
- govet
|
- govet
|
||||||
- unconvert
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- gosimple
|
|
||||||
- stylecheck
|
|
||||||
- unused
|
- unused
|
||||||
- misspell
|
# Additional
|
||||||
- gocritic
|
- gocritic
|
||||||
#- prealloc
|
- misspell
|
||||||
#- maligned
|
#- prealloc # TODO
|
||||||
disable-all: true
|
- revive
|
||||||
|
- unconvert
|
||||||
|
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||||
|
settings:
|
||||||
|
staticcheck:
|
||||||
|
# With staticcheck there is only one setting, so to extend the implicit
|
||||||
|
# default value it must be explicitly included.
|
||||||
|
checks:
|
||||||
|
# Default
|
||||||
|
- all
|
||||||
|
- -ST1000
|
||||||
|
- -ST1003
|
||||||
|
- -ST1016
|
||||||
|
- -ST1020
|
||||||
|
- -ST1021
|
||||||
|
- -ST1022
|
||||||
|
# Disable quickfix checks
|
||||||
|
- -QF*
|
||||||
|
gocritic:
|
||||||
|
# With gocritic there are different settings, but since enabled-checks
|
||||||
|
# and disabled-checks cannot both be set, for full customization the
|
||||||
|
# alternative is to disable all defaults and explicitly enable the ones
|
||||||
|
# to use.
|
||||||
|
disable-all: true
|
||||||
|
enabled-checks:
|
||||||
|
#- appendAssign # Skip default
|
||||||
|
- argOrder
|
||||||
|
- assignOp
|
||||||
|
- badCall
|
||||||
|
- badCond
|
||||||
|
#- captLocal # Skip default
|
||||||
|
- caseOrder
|
||||||
|
- codegenComment
|
||||||
|
#- commentFormatting # Skip default
|
||||||
|
- defaultCaseOrder
|
||||||
|
- deprecatedComment
|
||||||
|
- dupArg
|
||||||
|
- dupBranchBody
|
||||||
|
- dupCase
|
||||||
|
- dupSubExpr
|
||||||
|
- elseif
|
||||||
|
#- exitAfterDefer # Skip default
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
#- ifElseChain # Skip default
|
||||||
|
- mapKey
|
||||||
|
- newDeref
|
||||||
|
- offBy1
|
||||||
|
- regexpMust
|
||||||
|
- ruleguard # Enable additional check that are not enabled by default
|
||||||
|
#- singleCaseSwitch # Skip default
|
||||||
|
- sloppyLen
|
||||||
|
- sloppyTypeAssert
|
||||||
|
- switchTrue
|
||||||
|
- typeSwitchVar
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- wrapperFunc
|
||||||
|
settings:
|
||||||
|
ruleguard:
|
||||||
|
rules: ${base-path}/bin/rules.go
|
||||||
|
revive:
|
||||||
|
# With revive there is in reality only one setting, and when at least one
|
||||||
|
# rule are specified then only these rules will be considered, defaults
|
||||||
|
# and all others are then implicitly disabled, so must explicitly enable
|
||||||
|
# all rules to be used.
|
||||||
|
rules:
|
||||||
|
- name: blank-imports
|
||||||
|
disabled: false
|
||||||
|
- name: context-as-argument
|
||||||
|
disabled: false
|
||||||
|
- name: context-keys-type
|
||||||
|
disabled: false
|
||||||
|
- name: dot-imports
|
||||||
|
disabled: false
|
||||||
|
#- name: empty-block # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: error-naming
|
||||||
|
disabled: false
|
||||||
|
- name: error-return
|
||||||
|
disabled: false
|
||||||
|
- name: error-strings
|
||||||
|
disabled: false
|
||||||
|
- name: errorf
|
||||||
|
disabled: false
|
||||||
|
- name: exported
|
||||||
|
disabled: false
|
||||||
|
#- name: increment-decrement # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: indent-error-flow
|
||||||
|
disabled: false
|
||||||
|
- name: package-comments
|
||||||
|
disabled: false
|
||||||
|
- name: range
|
||||||
|
disabled: false
|
||||||
|
- name: receiver-naming
|
||||||
|
disabled: false
|
||||||
|
#- name: redefines-builtin-id # Skip default
|
||||||
|
# disabled: true
|
||||||
|
#- name: superfluous-else # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: time-naming
|
||||||
|
disabled: false
|
||||||
|
- name: unexported-return
|
||||||
|
disabled: false
|
||||||
|
#- name: unreachable-code # Skip default
|
||||||
|
# disabled: true
|
||||||
|
#- name: unused-parameter # Skip default
|
||||||
|
# disabled: true
|
||||||
|
- name: var-declaration
|
||||||
|
disabled: false
|
||||||
|
- name: var-naming
|
||||||
|
disabled: false
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- goimports
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Enable some lints excluded by default
|
|
||||||
exclude-use-default: false
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||||
max-same-issues: 0
|
max-same-issues: 0
|
||||||
|
|
||||||
exclude-rules:
|
|
||||||
|
|
||||||
- linters:
|
|
||||||
- staticcheck
|
|
||||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
|
||||||
|
|
||||||
# don't disable the revive messages about comments on exported functions
|
|
||||||
include:
|
|
||||||
- EXC0012
|
|
||||||
- EXC0013
|
|
||||||
- EXC0014
|
|
||||||
- EXC0015
|
|
||||||
|
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
||||||
timeout: 10m
|
timeout: 10m
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
revive:
|
|
||||||
# setting rules seems to disable all the rules, so re-enable them here
|
|
||||||
rules:
|
|
||||||
- name: blank-imports
|
|
||||||
disabled: false
|
|
||||||
- name: context-as-argument
|
|
||||||
disabled: false
|
|
||||||
- name: context-keys-type
|
|
||||||
disabled: false
|
|
||||||
- name: dot-imports
|
|
||||||
disabled: false
|
|
||||||
- name: empty-block
|
|
||||||
disabled: true
|
|
||||||
- name: error-naming
|
|
||||||
disabled: false
|
|
||||||
- name: error-return
|
|
||||||
disabled: false
|
|
||||||
- name: error-strings
|
|
||||||
disabled: false
|
|
||||||
- name: errorf
|
|
||||||
disabled: false
|
|
||||||
- name: exported
|
|
||||||
disabled: false
|
|
||||||
- name: increment-decrement
|
|
||||||
disabled: true
|
|
||||||
- name: indent-error-flow
|
|
||||||
disabled: false
|
|
||||||
- name: package-comments
|
|
||||||
disabled: false
|
|
||||||
- name: range
|
|
||||||
disabled: false
|
|
||||||
- name: receiver-naming
|
|
||||||
disabled: false
|
|
||||||
- name: redefines-builtin-id
|
|
||||||
disabled: true
|
|
||||||
- name: superfluous-else
|
|
||||||
disabled: true
|
|
||||||
- name: time-naming
|
|
||||||
disabled: false
|
|
||||||
- name: unexported-return
|
|
||||||
disabled: false
|
|
||||||
- name: unreachable-code
|
|
||||||
disabled: true
|
|
||||||
- name: unused-parameter
|
|
||||||
disabled: true
|
|
||||||
- name: var-declaration
|
|
||||||
disabled: false
|
|
||||||
- name: var-naming
|
|
||||||
disabled: false
|
|
||||||
stylecheck:
|
|
||||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
|
||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
|
||||||
gocritic:
|
|
||||||
# Enable all default checks with some exceptions and some additions (commented).
|
|
||||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
|
||||||
disable-all: true
|
|
||||||
enabled-checks:
|
|
||||||
#- appendAssign # Enabled by default
|
|
||||||
- argOrder
|
|
||||||
- assignOp
|
|
||||||
- badCall
|
|
||||||
- badCond
|
|
||||||
#- captLocal # Enabled by default
|
|
||||||
- caseOrder
|
|
||||||
- codegenComment
|
|
||||||
#- commentFormatting # Enabled by default
|
|
||||||
- defaultCaseOrder
|
|
||||||
- deprecatedComment
|
|
||||||
- dupArg
|
|
||||||
- dupBranchBody
|
|
||||||
- dupCase
|
|
||||||
- dupSubExpr
|
|
||||||
- elseif
|
|
||||||
#- exitAfterDefer # Enabled by default
|
|
||||||
- flagDeref
|
|
||||||
- flagName
|
|
||||||
#- ifElseChain # Enabled by default
|
|
||||||
- mapKey
|
|
||||||
- newDeref
|
|
||||||
- offBy1
|
|
||||||
- regexpMust
|
|
||||||
- ruleguard # Not enabled by default
|
|
||||||
#- singleCaseSwitch # Enabled by default
|
|
||||||
- sloppyLen
|
|
||||||
- sloppyTypeAssert
|
|
||||||
- switchTrue
|
|
||||||
- typeSwitchVar
|
|
||||||
- underef
|
|
||||||
- unlambda
|
|
||||||
- unslice
|
|
||||||
- valSwap
|
|
||||||
- wrapperFunc
|
|
||||||
settings:
|
|
||||||
ruleguard:
|
|
||||||
rules: "${configDir}/bin/rules.go"
|
|
||||||
|
|||||||
43
.markdownlint.yml
Normal file
43
.markdownlint.yml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
default: true
|
||||||
|
|
||||||
|
# Use specific styles, to be consistent accross all documents.
|
||||||
|
# Default is to accept any as long as it is consistent within the same document.
|
||||||
|
heading-style: # MD003
|
||||||
|
style: atx
|
||||||
|
ul-style: # MD004
|
||||||
|
style: dash
|
||||||
|
hr-style: # MD035
|
||||||
|
style: ---
|
||||||
|
code-block-style: # MD046
|
||||||
|
style: fenced
|
||||||
|
code-fence-style: # MD048
|
||||||
|
style: backtick
|
||||||
|
emphasis-style: # MD049
|
||||||
|
style: asterisk
|
||||||
|
strong-style: # MD050
|
||||||
|
style: asterisk
|
||||||
|
|
||||||
|
# Allow multiple headers with same text as long as they are not siblings.
|
||||||
|
no-duplicate-heading: # MD024
|
||||||
|
siblings_only: true
|
||||||
|
|
||||||
|
# Allow long lines in code blocks and tables.
|
||||||
|
line-length: # MD013
|
||||||
|
code_blocks: false
|
||||||
|
tables: false
|
||||||
|
|
||||||
|
# The Markdown files used to generated docs with Hugo contain a top level
|
||||||
|
# header, even though the YAML front matter has a title property (which is
|
||||||
|
# used for the HTML document title only). Suppress Markdownlint warning:
|
||||||
|
# Multiple top-level headings in the same document.
|
||||||
|
single-title: # MD025
|
||||||
|
level: 1
|
||||||
|
front_matter_title:
|
||||||
|
|
||||||
|
# The HTML docs generated by Hugo from Markdown files may have slightly
|
||||||
|
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
|
||||||
|
# leading dashes so "--config string" becomes "#config-string" while it is
|
||||||
|
# "#--config-string" in GitHub preview. When writing links to headers in the
|
||||||
|
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||||
|
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||||
|
link-fragments: false # MD051
|
||||||
80
CODE_OF_CONDUCT.md
Normal file
80
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Rclone Code of Conduct
|
||||||
|
|
||||||
|
Like the technical community as a whole, the Rclone team and community
|
||||||
|
is made up of a mixture of professionals and volunteers from all over
|
||||||
|
the world, working on every aspect of the mission - including
|
||||||
|
mentorship, teaching, and connecting people.
|
||||||
|
|
||||||
|
Diversity is one of our huge strengths, but it can also lead to
|
||||||
|
communication issues and unhappiness. To that end, we have a few
|
||||||
|
ground rules that we ask people to adhere to. This code applies
|
||||||
|
equally to founders, mentors and those seeking help and guidance.
|
||||||
|
|
||||||
|
This isn't an exhaustive list of things that you can't do. Rather,
|
||||||
|
take it in the spirit in which it's intended - a guide to make it
|
||||||
|
easier to enrich all of us and the technical communities in which we
|
||||||
|
participate.
|
||||||
|
|
||||||
|
This code of conduct applies to all spaces managed by the Rclone
|
||||||
|
project or Rclone Services Ltd. This includes the issue tracker, the
|
||||||
|
forum, the GitHub site, the wiki, any other online services or
|
||||||
|
in-person events. In addition, violations of this code outside these
|
||||||
|
spaces may affect a person's ability to participate within them.
|
||||||
|
|
||||||
|
- **Be friendly and patient.**
|
||||||
|
- **Be welcoming.** We strive to be a community that welcomes and
|
||||||
|
supports people of all backgrounds and identities. This includes,
|
||||||
|
but is not limited to members of any race, ethnicity, culture,
|
||||||
|
national origin, colour, immigration status, social and economic
|
||||||
|
class, educational level, sex, sexual orientation, gender identity
|
||||||
|
and expression, age, size, family status, political belief,
|
||||||
|
religion, and mental and physical ability.
|
||||||
|
- **Be considerate.** Your work will be used by other people, and you
|
||||||
|
in turn will depend on the work of others. Any decision you take
|
||||||
|
will affect users and colleagues, and you should take those
|
||||||
|
consequences into account when making decisions. Remember that we're
|
||||||
|
a world-wide community, so you might not be communicating in someone
|
||||||
|
else's primary language.
|
||||||
|
- **Be respectful.** Not all of us will agree all the time, but
|
||||||
|
disagreement is no excuse for poor behavior and poor manners. We
|
||||||
|
might all experience some frustration now and then, but we cannot
|
||||||
|
allow that frustration to turn into a personal attack. It's
|
||||||
|
important to remember that a community where people feel
|
||||||
|
uncomfortable or threatened is not a productive one. Members of the
|
||||||
|
Rclone community should be respectful when dealing with other
|
||||||
|
members as well as with people outside the Rclone community.
|
||||||
|
- **Be careful in the words that you choose.** We are a community of
|
||||||
|
professionals, and we conduct ourselves professionally. Be kind to
|
||||||
|
others. Do not insult or put down other participants. Harassment and
|
||||||
|
other exclusionary behavior aren't acceptable. This includes, but is
|
||||||
|
not limited to:
|
||||||
|
- Violent threats or language directed against another person.
|
||||||
|
- Discriminatory jokes and language.
|
||||||
|
- Posting sexually explicit or violent material.
|
||||||
|
- Posting (or threatening to post) other people's personally
|
||||||
|
identifying information ("doxing").
|
||||||
|
- Personal insults, especially those using racist or sexist terms.
|
||||||
|
- Unwelcome sexual attention.
|
||||||
|
- Advocating for, or encouraging, any of the above behavior.
|
||||||
|
- Repeated harassment of others. In general, if someone asks you to
|
||||||
|
stop, then stop.
|
||||||
|
- **When we disagree, try to understand why.** Disagreements, both
|
||||||
|
social and technical, happen all the time and Rclone is no
|
||||||
|
exception. It is important that we resolve disagreements and
|
||||||
|
differing views constructively. Remember that we're different. The
|
||||||
|
strength of Rclone comes from its varied community, people from a
|
||||||
|
wide range of backgrounds. Different people have different
|
||||||
|
perspectives on issues. Being unable to understand why someone holds
|
||||||
|
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
||||||
|
human to err and blaming each other doesn't get us anywhere.
|
||||||
|
Instead, focus on helping to resolve issues and learning from
|
||||||
|
mistakes.
|
||||||
|
|
||||||
|
If you believe someone is violating the code of conduct, we ask that
|
||||||
|
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
||||||
|
|
||||||
|
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
||||||
|
|
||||||
|
## Questions?
|
||||||
|
|
||||||
|
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
||||||
504
CONTRIBUTING.md
504
CONTRIBUTING.md
@@ -15,61 +15,81 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
|||||||
- Rclone version (e.g. output from `rclone version`)
|
- Rclone version (e.g. output from `rclone version`)
|
||||||
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
|
||||||
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
|
||||||
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
|
- A log of the command with the `-vv` flag (e.g. output from
|
||||||
- if the log contains secrets then edit the file with a text editor first to obscure them
|
`rclone -vv copy /tmp remote:tmp`)
|
||||||
|
- if the log contains secrets then edit the file with a text editor first to
|
||||||
|
obscure them
|
||||||
|
|
||||||
## Submitting a new feature or bug fix
|
## Submitting a new feature or bug fix
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via GitHub.
|
||||||
|
|
||||||
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
|
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
|
||||||
|
first so it can be discussed.
|
||||||
|
|
||||||
To prepare your pull request first press the fork button on [rclone's GitHub
|
To prepare your pull request first press the fork button on [rclone's GitHub
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/rclone/rclone).
|
||||||
|
|
||||||
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
Then [install Git](https://git-scm.com/downloads) and set your public contribution
|
||||||
|
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
|
||||||
|
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
|
||||||
|
|
||||||
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
|
Next open your terminal, change directory to your preferred folder and initialise
|
||||||
|
your local rclone project:
|
||||||
|
|
||||||
git clone https://github.com/rclone/rclone.git
|
```sh
|
||||||
cd rclone
|
git clone https://github.com/rclone/rclone.git
|
||||||
git remote rename origin upstream
|
cd rclone
|
||||||
# if you have SSH keys setup in your GitHub account:
|
git remote rename origin upstream
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
# if you have SSH keys setup in your GitHub account:
|
||||||
# otherwise:
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
git remote add origin https://github.com/YOURUSER/rclone.git
|
# otherwise:
|
||||||
|
git remote add origin https://github.com/YOURUSER/rclone.git
|
||||||
|
```
|
||||||
|
|
||||||
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
|
Note that most of the terminal commands in the rest of this guide must be
|
||||||
|
executed from the rclone folder created above.
|
||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||||
|
|
||||||
go version
|
```sh
|
||||||
|
go version
|
||||||
|
```
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
Great, you can now compile and execute your own version of rclone:
|
||||||
|
|
||||||
go build
|
```sh
|
||||||
./rclone version
|
go build
|
||||||
|
./rclone version
|
||||||
|
```
|
||||||
|
|
||||||
(Note that you can also replace `go build` with `make`, which will include a
|
(Note that you can also replace `go build` with `make`, which will include a
|
||||||
more accurate version number in the executable as well as enable you to specify
|
more accurate version number in the executable as well as enable you to specify
|
||||||
more build options.) Finally make a branch to add your new feature
|
more build options.) Finally make a branch to add your new feature
|
||||||
|
|
||||||
git checkout -b my-new-feature
|
```sh
|
||||||
|
git checkout -b my-new-feature
|
||||||
|
```
|
||||||
|
|
||||||
And get hacking.
|
And get hacking.
|
||||||
|
|
||||||
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
|
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
|
||||||
|
and a quick view on the rclone [code organisation](#code-organisation).
|
||||||
|
|
||||||
When ready - test the affected functionality and run the unit tests for the code you changed
|
When ready - test the affected functionality and run the unit tests for the
|
||||||
|
code you changed
|
||||||
|
|
||||||
cd folder/with/changed/files
|
```sh
|
||||||
go test -v
|
cd folder/with/changed/files
|
||||||
|
go test -v
|
||||||
|
```
|
||||||
|
|
||||||
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
Note that you may need to make a test remote, e.g. `TestSwift` for some
|
||||||
of the unit tests.
|
of the unit tests.
|
||||||
|
|
||||||
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
|
This is typically enough if you made a simple bug fix, otherwise please read
|
||||||
|
the rclone [testing](#testing) section too.
|
||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
@@ -79,14 +99,19 @@ Make sure you
|
|||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
git push -u origin my-new-feature
|
```sh
|
||||||
|
git push -u origin my-new-feature
|
||||||
|
```
|
||||||
|
|
||||||
and open the GitHub website to [create your pull
|
and open the GitHub website to [create your pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
|
Your changes will then get reviewed and you might get asked to fix some stuff.
|
||||||
|
If so, then make the changes in the same branch, commit and push your updates to
|
||||||
|
GitHub.
|
||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
|
||||||
|
or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and GitHub
|
## Using Git and GitHub
|
||||||
|
|
||||||
@@ -94,87 +119,118 @@ You may sometimes be asked to [base your changes on the latest master](#basing-y
|
|||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
git checkout my-new-feature # To switch to your branch
|
```sh
|
||||||
git status # To see the new and changed files
|
git checkout my-new-feature # To switch to your branch
|
||||||
git add FILENAME # To select FILENAME for the commit
|
git status # To see the new and changed files
|
||||||
git status # To verify the changes to be committed
|
git add FILENAME # To select FILENAME for the commit
|
||||||
git commit # To do the commit
|
git status # To verify the changes to be committed
|
||||||
git log # To verify the commit. Use q to quit the log
|
git commit # To do the commit
|
||||||
|
git log # To verify the commit. Use q to quit the log
|
||||||
|
```
|
||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
git commit --amend
|
```sh
|
||||||
|
git commit --amend
|
||||||
|
```
|
||||||
|
|
||||||
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you amend to commits that have been pushed to GitHub, then you will have to
|
||||||
|
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
### Replacing your previously pushed commits
|
### Replacing your previously pushed commits
|
||||||
|
|
||||||
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
|
Note that you are about to rewrite the GitHub history of your branch. It is good
|
||||||
|
practice to involve your collaborators before modifying commits that have been
|
||||||
|
pushed to GitHub.
|
||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
git push --force origin my-new-feature
|
```sh
|
||||||
|
git push --force origin my-new-feature
|
||||||
|
```
|
||||||
|
|
||||||
### Basing your changes on the latest master
|
### Basing your changes on the latest master
|
||||||
|
|
||||||
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
To base your changes on the latest version of the
|
||||||
|
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
git checkout master
|
```sh
|
||||||
git fetch upstream
|
git checkout master
|
||||||
git merge --ff-only
|
git fetch upstream
|
||||||
git push origin --follow-tags # optional update of your fork in GitHub
|
git merge --ff-only
|
||||||
git checkout my-new-feature
|
git push origin --follow-tags # optional update of your fork in GitHub
|
||||||
git rebase master
|
git checkout my-new-feature
|
||||||
|
git rebase master
|
||||||
|
```
|
||||||
|
|
||||||
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you rebase commits that have been pushed to GitHub, then you will have to
|
||||||
|
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
### Squashing your commits ###
|
### Squashing your commits
|
||||||
|
|
||||||
To combine your commits into one commit:
|
To combine your commits into one commit:
|
||||||
|
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
```sh
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
git log # To count the commits to squash, e.g. the last 2
|
||||||
git status # To check everything is as expected
|
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||||
|
git status # To check everything is as expected
|
||||||
|
```
|
||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
If everything is fine, then make the new combined commit:
|
||||||
|
|
||||||
git commit # To commit the undone commits as one
|
```sh
|
||||||
|
git commit # To commit the undone commits as one
|
||||||
|
```
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
otherwise, you may roll back using:
|
||||||
|
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
```sh
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
git reflog # To check that HEAD{1} is your previous state
|
||||||
|
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||||
|
```
|
||||||
|
|
||||||
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
If you squash commits that have been pushed to GitHub, then you will have to
|
||||||
|
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
|
||||||
|
|
||||||
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
|
Tip: You may like to use `git rebase -i master` if you are experienced or have a
|
||||||
|
more complex situation.
|
||||||
|
|
||||||
### GitHub Continuous Integration
|
### GitHub Continuous Integration
|
||||||
|
|
||||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
|
||||||
|
to build and test the project, which should be automatically available for your
|
||||||
|
fork too from the `Actions` tab in your repository.
|
||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
|
|
||||||
### Code quality tests
|
### Code quality tests
|
||||||
|
|
||||||
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
|
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
|
||||||
|
you can run the same tests as get run in the CI which can be very helpful.
|
||||||
|
|
||||||
You can run them with `make check` or with `golangci-lint run ./...`.
|
You can run them with `make check` or with `golangci-lint run ./...`.
|
||||||
|
|
||||||
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
|
Using these tests ensures that the rclone codebase all uses the same coding
|
||||||
|
standards. These tests also check for easy mistakes to make (like forgetting
|
||||||
|
to check an error return).
|
||||||
|
|
||||||
### Quick testing
|
### Quick testing
|
||||||
|
|
||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
go test -v ./...
|
```sh
|
||||||
|
go test -v ./...
|
||||||
|
```
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
make quicktest
|
```sh
|
||||||
|
make quicktest
|
||||||
|
```
|
||||||
|
|
||||||
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
|
The quicktest is [automatically run by GitHub](#github-continuous-integration)
|
||||||
|
when you push your branch to GitHub.
|
||||||
|
|
||||||
### Backend testing
|
### Backend testing
|
||||||
|
|
||||||
@@ -190,41 +246,51 @@ need to make a remote called `TestDrive`.
|
|||||||
You can then run the unit tests in the drive directory. These tests
|
You can then run the unit tests in the drive directory. These tests
|
||||||
are skipped if `TestDrive:` isn't defined.
|
are skipped if `TestDrive:` isn't defined.
|
||||||
|
|
||||||
cd backend/drive
|
```sh
|
||||||
go test -v
|
cd backend/drive
|
||||||
|
go test -v
|
||||||
|
```
|
||||||
|
|
||||||
You can then run the integration tests which test all of rclone's
|
You can then run the integration tests which test all of rclone's
|
||||||
operations. Normally these get run against the local file system,
|
operations. Normally these get run against the local file system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
cd fs/sync
|
```sh
|
||||||
go test -v -remote TestDrive:
|
cd fs/sync
|
||||||
go test -v -remote TestDrive: -fast-list
|
go test -v -remote TestDrive:
|
||||||
|
go test -v -remote TestDrive: -fast-list
|
||||||
|
|
||||||
cd fs/operations
|
cd fs/operations
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
|
```
|
||||||
|
|
||||||
If you want to use the integration test framework to run these tests
|
If you want to use the integration test framework to run these tests
|
||||||
altogether with an HTML report and test retries then from the
|
altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
```sh
|
||||||
test_all -backends drive
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
|
test_all -backends drive
|
||||||
|
```
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
make check
|
```sh
|
||||||
make test
|
make check
|
||||||
|
make test
|
||||||
|
```
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
The commands may require some extra go packages which you can install with
|
||||||
|
|
||||||
make build_dep
|
```sh
|
||||||
|
make build_dep
|
||||||
|
```
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at <https://pub.rclone.org/integration-tests/>
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation
|
||||||
|
|
||||||
@@ -232,46 +298,48 @@ Rclone code is organised into a small number of top level directories
|
|||||||
with modules beneath.
|
with modules beneath.
|
||||||
|
|
||||||
- backend - the rclone backends for interfacing to cloud providers -
|
- backend - the rclone backends for interfacing to cloud providers -
|
||||||
- all - import this to load all the cloud providers
|
- all - import this to load all the cloud providers
|
||||||
- ...providers
|
- ...providers
|
||||||
- bin - scripts for use while building or maintaining rclone
|
- bin - scripts for use while building or maintaining rclone
|
||||||
- cmd - the rclone commands
|
- cmd - the rclone commands
|
||||||
- all - import this to load all the commands
|
- all - import this to load all the commands
|
||||||
- ...commands
|
- ...commands
|
||||||
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
- cmdtest - end-to-end tests of commands, flags, environment variables,...
|
||||||
- docs - the documentation and website
|
- docs - the documentation and website
|
||||||
- content - adjust these docs only - everything else is autogenerated
|
- content - adjust these docs only, except those marked autogenerated
|
||||||
- command - these are auto-generated - edit the corresponding .go file
|
or portions marked autogenerated where the corresponding .go file must be
|
||||||
|
edited instead, and everything else is autogenerated
|
||||||
|
- commands - these are auto-generated, edit the corresponding .go file
|
||||||
- fs - main rclone definitions - minimal amount of code
|
- fs - main rclone definitions - minimal amount of code
|
||||||
- accounting - bandwidth limiting and statistics
|
- accounting - bandwidth limiting and statistics
|
||||||
- asyncreader - an io.Reader which reads ahead
|
- asyncreader - an io.Reader which reads ahead
|
||||||
- config - manage the config file and flags
|
- config - manage the config file and flags
|
||||||
- driveletter - detect if a name is a drive letter
|
- driveletter - detect if a name is a drive letter
|
||||||
- filter - implements include/exclude filtering
|
- filter - implements include/exclude filtering
|
||||||
- fserrors - rclone specific error handling
|
- fserrors - rclone specific error handling
|
||||||
- fshttp - http handling for rclone
|
- fshttp - http handling for rclone
|
||||||
- fspath - path handling for rclone
|
- fspath - path handling for rclone
|
||||||
- hash - defines rclone's hash types and functions
|
- hash - defines rclone's hash types and functions
|
||||||
- list - list a remote
|
- list - list a remote
|
||||||
- log - logging facilities
|
- log - logging facilities
|
||||||
- march - iterates directories in lock step
|
- march - iterates directories in lock step
|
||||||
- object - in memory Fs objects
|
- object - in memory Fs objects
|
||||||
- operations - primitives for sync, e.g. Copy, Move
|
- operations - primitives for sync, e.g. Copy, Move
|
||||||
- sync - sync directories
|
- sync - sync directories
|
||||||
- walk - walk a directory
|
- walk - walk a directory
|
||||||
- fstest - provides integration test framework
|
- fstest - provides integration test framework
|
||||||
- fstests - integration tests for the backends
|
- fstests - integration tests for the backends
|
||||||
- mockdir - mocks an fs.Directory
|
- mockdir - mocks an fs.Directory
|
||||||
- mockobject - mocks an fs.Object
|
- mockobject - mocks an fs.Object
|
||||||
- test_all - Runs integration tests for everything
|
- test_all - Runs integration tests for everything
|
||||||
- graphics - the images used in the website, etc.
|
- graphics - the images used in the website, etc.
|
||||||
- lib - libraries used by the backend
|
- lib - libraries used by the backend
|
||||||
- atexit - register functions to run when rclone exits
|
- atexit - register functions to run when rclone exits
|
||||||
- dircache - directory ID to name caching
|
- dircache - directory ID to name caching
|
||||||
- oauthutil - helpers for using oauth
|
- oauthutil - helpers for using oauth
|
||||||
- pacer - retries with backoff and paces operations
|
- pacer - retries with backoff and paces operations
|
||||||
- readers - a selection of useful io.Readers
|
- readers - a selection of useful io.Readers
|
||||||
- rest - a thin abstraction over net/http for REST
|
- rest - a thin abstraction over net/http for REST
|
||||||
- librclone - in memory interface to rclone's API for embedding rclone
|
- librclone - in memory interface to rclone's API for embedding rclone
|
||||||
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||||
|
|
||||||
@@ -279,6 +347,36 @@ with modules beneath.
|
|||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
|
The documentation sources are generally in Markdown format, in conformance
|
||||||
|
with the CommonMark specification and compatible with GitHub Flavored
|
||||||
|
Markdown (GFM). The markdown format is checked as part of the lint operation
|
||||||
|
that runs automatically on pull requests, to enforce standards and consistency.
|
||||||
|
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||||
|
tool, which can also be integrated into editors so you can perform the same
|
||||||
|
checks while writing.
|
||||||
|
|
||||||
|
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||||
|
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||||
|
there is currently used a different algorithm for generating header anchors
|
||||||
|
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
|
||||||
|
generated by Hugo any leading `-` characters are ignored, which means when
|
||||||
|
linking to a header with text `--config string` we therefore need to use the
|
||||||
|
link `#config-string` in our Markdown source, which will not work in GitHub's
|
||||||
|
preview where `#--config-string` would be the correct link.
|
||||||
|
|
||||||
|
Most of the documentation are written directly in text files with extension
|
||||||
|
`.md`, mainly within folder `docs/content`. Note that several of such files
|
||||||
|
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
|
||||||
|
or contain autogenerated portions (e.g. the backend documentation under
|
||||||
|
`docs/content/commands`). These are marked with an `autogenerated` comment.
|
||||||
|
The sources of the autogenerated text are usually Markdown formatted text
|
||||||
|
embedded as string values in the Go source code, so you need to locate these
|
||||||
|
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
|
||||||
|
files in the root of the repository are also autogenerated. The autogeneration
|
||||||
|
of files, and the website, will be done during the release process. See the
|
||||||
|
`make doc` and `make website` targets in the Makefile if you are interested in
|
||||||
|
how. You don't need to run these when adding a feature.
|
||||||
|
|
||||||
If you add a new general flag (not for a backend), then document it in
|
If you add a new general flag (not for a backend), then document it in
|
||||||
`docs/content/docs.md` - the flags there are supposed to be in
|
`docs/content/docs.md` - the flags there are supposed to be in
|
||||||
alphabetical order.
|
alphabetical order.
|
||||||
@@ -287,39 +385,40 @@ If you add a new backend option/flag, then it should be documented in
|
|||||||
the source file in the `Help:` field.
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
- Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
- This text will be used for the command-line flag help.
|
- This text will be used for the command-line flag help.
|
||||||
- It will be combined with other information, such as any default value,
|
- It will be combined with other information, such as any default value,
|
||||||
and the result will look odd if not written as a single sentence.
|
and the result will look odd if not written as a single sentence.
|
||||||
- It should end with a period/full stop character, which will be shown
|
- It should end with a period/full stop character, which will be shown
|
||||||
in docs but automatically removed when producing the flag help.
|
in docs but automatically removed when producing the flag help.
|
||||||
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
|
||||||
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
|
||||||
- Like with docs generated from Markdown, a single line break is ignored
|
- Like with docs generated from Markdown, a single line break is ignored
|
||||||
and two line breaks creates a new paragraph.
|
and two line breaks creates a new paragraph.
|
||||||
- This text will be shown to the user in `rclone config`
|
- This text will be shown to the user in `rclone config`
|
||||||
and in the docs (where it will be added by `make backenddocs`,
|
and in the docs (where it will be added by `make backenddocs`,
|
||||||
normally run some time before next release).
|
normally run some time before next release).
|
||||||
- To create options of enumeration type use the `Examples:` field.
|
- To create options of enumeration type use the `Examples:` field.
|
||||||
- Each example value have their own `Help:` field, but they are treated
|
- Each example value have their own `Help:` field, but they are treated
|
||||||
a bit different than the main option help text. They will be shown
|
a bit different than the main option help text. They will be shown
|
||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
countries, it looks better without an ending period/full stop character.
|
countries, it looks better without an ending period/full stop character.
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
When writing documentation for an entirely new backend,
|
||||||
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
|
see [backend documentation](#backend-documentation).
|
||||||
from those during the release process. See the `make doc` and `make
|
|
||||||
website` targets in the Makefile if you are interested in how. You
|
|
||||||
don't need to run these when adding a feature.
|
|
||||||
|
|
||||||
Documentation for rclone sub commands is with their code, e.g.
|
If you are updating documentation for a command, you must do that in the
|
||||||
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
|
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
|
||||||
line, without a period/full stop character at the end, as it will be
|
sentence on a single line, without a period/full stop character at the end,
|
||||||
combined unmodified with other information (such as any default value).
|
as it will be combined unmodified with other information (such as any default
|
||||||
|
value).
|
||||||
|
|
||||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
Note that you can use
|
||||||
for small changes in the docs which makes it very easy.
|
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||||
|
for small changes in the docs which makes it very easy. Just remember the
|
||||||
|
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||||
|
Markdown preview may not be an entirely reliable verification of the results.
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
@@ -350,13 +449,13 @@ change will get linked into the issue.
|
|||||||
|
|
||||||
Here is an example of a short commit message:
|
Here is an example of a short commit message:
|
||||||
|
|
||||||
```
|
```text
|
||||||
drive: add team drive support - fixes #885
|
drive: add team drive support - fixes #885
|
||||||
```
|
```
|
||||||
|
|
||||||
And here is an example of a longer one:
|
And here is an example of a longer one:
|
||||||
|
|
||||||
```
|
```text
|
||||||
mount: fix hang on errored upload
|
mount: fix hang on errored upload
|
||||||
|
|
||||||
In certain circumstances, if an upload failed then the mount could hang
|
In certain circumstances, if an upload failed then the mount could hang
|
||||||
@@ -379,7 +478,9 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
go get github.com/ncw/new_dependency
|
```sh
|
||||||
|
go get github.com/ncw/new_dependency
|
||||||
|
```
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -391,7 +492,9 @@ and `go.sum` in the same commit as your other changes.
|
|||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
go get golang.org/x/crypto
|
```sh
|
||||||
|
go get golang.org/x/crypto
|
||||||
|
```
|
||||||
|
|
||||||
Check in a single commit as above.
|
Check in a single commit as above.
|
||||||
|
|
||||||
@@ -434,25 +537,38 @@ remote or an fs.
|
|||||||
### Getting going
|
### Getting going
|
||||||
|
|
||||||
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
- Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||||
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
|
- box is a good one to start from if you have a directory-based remote (and
|
||||||
- b2 is a good one to start from if you have a bucket-based remote
|
shows how to use the directory cache)
|
||||||
|
- b2 is a good one to start from if you have a bucket-based remote
|
||||||
- Add your remote to the imports in `backend/all/all.go`
|
- Add your remote to the imports in `backend/all/all.go`
|
||||||
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
|
- HTTP based remotes are easiest to maintain if they use rclone's
|
||||||
- Try to implement as many optional methods as possible as it makes the remote more usable.
|
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
|
||||||
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
if there is a really good Go SDK from the provider then use that instead.
|
||||||
- `rclone purge -v TestRemote:rclone-info`
|
- Try to implement as many optional methods as possible as it makes the remote
|
||||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
more usable.
|
||||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
|
||||||
- open `remote.csv` in a spreadsheet and examine
|
make sure we can encode any path name and `rclone info` to help determine the
|
||||||
|
encodings needed
|
||||||
|
- `rclone purge -v TestRemote:rclone-info`
|
||||||
|
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||||
|
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||||
|
- open `remote.csv` in a spreadsheet and examine
|
||||||
|
|
||||||
### Guidelines for a speedy merge
|
### Guidelines for a speedy merge
|
||||||
|
|
||||||
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
|
||||||
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
if you are implementing a REST like backend and parsing XML/JSON in the backend.
|
||||||
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
|
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
|
||||||
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
|
if your backend is HTTP based - this adds features like `--dump bodies`,
|
||||||
|
`--tpslimit`, `--user-agent` without you having to code anything!
|
||||||
|
- **Do** follow your example backend exactly - use the same code order, function
|
||||||
|
names, layout, structure. **Don't** move stuff around and **Don't** delete the
|
||||||
|
comments.
|
||||||
|
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
|
||||||
|
backends like that - don't follow them!)
|
||||||
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
|
||||||
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
|
- **Remember** we have >50 backends to maintain so keeping them as similar as
|
||||||
|
possible to each other is a high priority!
|
||||||
|
|
||||||
### Unit tests
|
### Unit tests
|
||||||
|
|
||||||
@@ -463,19 +579,20 @@ remote or an fs.
|
|||||||
### Integration tests
|
### Integration tests
|
||||||
|
|
||||||
- Add your backend to `fstest/test_all/config.yaml`
|
- Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Once you've done that then you can use the integration test framework from the project root:
|
- Once you've done that then you can use the integration test framework from
|
||||||
- go install ./...
|
the project root:
|
||||||
- test_all -backends remote
|
- go install ./...
|
||||||
|
- test_all -backends remote
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
- Make sure integration tests pass with
|
- Make sure integration tests pass with
|
||||||
- `cd fs/operations`
|
- `cd fs/operations`
|
||||||
- `go test -v -remote TestRemote:`
|
- `go test -v -remote TestRemote:`
|
||||||
- `cd fs/sync`
|
- `cd fs/sync`
|
||||||
- `go test -v -remote TestRemote:`
|
- `go test -v -remote TestRemote:`
|
||||||
- If your remote defines `ListR` check with this also
|
- If your remote defines `ListR` check with this also
|
||||||
- `go test -v -remote TestRemote: -fast-list`
|
- `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
@@ -487,10 +604,13 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
|||||||
`Google Drive`) but with the local file system last.
|
`Google Drive`) but with the local file system last.
|
||||||
|
|
||||||
- `README.md` - main GitHub page
|
- `README.md` - main GitHub page
|
||||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
automatically added to this file with `make backenddocs`)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
- make sure this has the `autogenerated options` comments in (see your
|
||||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
reference backend docs)
|
||||||
|
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||||
|
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||||
|
table and the Optional Features table.
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
- `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
- `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
@@ -506,21 +626,22 @@ It is quite easy to add a new S3 provider to rclone.
|
|||||||
You'll need to modify the following files
|
You'll need to modify the following files
|
||||||
|
|
||||||
- `backend/s3/s3.go`
|
- `backend/s3/s3.go`
|
||||||
- Add the provider to `providerOption` at the top of the file
|
- Add the provider to `providerOption` at the top of the file
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||||
- `docs/content/s3.md`
|
- `docs/content/s3.md`
|
||||||
- Add the provider at the top of the page.
|
- Add the provider at the top of the page.
|
||||||
- Add a section about the provider linked from there.
|
- Add a section about the provider linked from there.
|
||||||
- Add a transcript of a trial `rclone config` session
|
- Make sure this is in alphabetical order in the `Providers` section.
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
- Add a transcript of a trial `rclone config` session
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
- Edit the transcript to remove things which might change in subsequent versions
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||||
|
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||||
- `README.md` - this is the home page in github
|
- `README.md` - this is the home page in github
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
|
|
||||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||||
alphabetical order by `Provider` name, but with `AWS` first and
|
alphabetical order by `Provider` name, but with `AWS` first and
|
||||||
@@ -541,31 +662,34 @@ For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone
|
|||||||
|
|
||||||
## Writing a plugin
|
## Writing a plugin
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
New features (backends, commands) can also be added "out-of-tree", through Go
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
plugins. Changes will be kept in a dynamically loaded file instead of being
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
compiled into the main binary. This is useful if you can't merge your changes
|
||||||
|
upstream or don't want to maintain a fork of rclone.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
- Naming
|
- Naming
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||||
- Example: A plugin with backend support for PiFS would be called
|
- Example: A plugin with backend support for PiFS would be called
|
||||||
`librcloneplugin_backend_pifs.so`.
|
`librcloneplugin_backend_pifs.so`.
|
||||||
- Loading
|
- Loading
|
||||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||||
- Supported on rclone v1.50 or greater.
|
- Supported on rclone v1.50 or greater.
|
||||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||||
- If this variable doesn't exist, plugin support is disabled.
|
- If this variable doesn't exist, plugin support is disabled.
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
- Plugins must be compiled against the exact version of rclone to work.
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
(The rclone used during building the plugin must be the same as the source
|
||||||
|
of rclone)
|
||||||
|
|
||||||
### Building
|
### Building
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
To turn your existing additions into a Go plugin, move them to an external repository
|
||||||
and change the top-level package name to `main`.
|
and change the top-level package name to `main`.
|
||||||
|
|
||||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
Check `rclone --version` and make sure that the plugin's rclone dependency and
|
||||||
|
host Go version match.
|
||||||
|
|
||||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||||
|
|
||||||
@@ -583,6 +707,6 @@ add them out of tree.
|
|||||||
This may be easier than using a plugin and is supported on all
|
This may be easier than using a plugin and is supported on all
|
||||||
platforms not just macOS and Linux.
|
platforms not just macOS and Linux.
|
||||||
|
|
||||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
|
||||||
which has an example of an out of tree backend `ram` (which is a
|
which has an example of an out of tree backend `ram` (which is a
|
||||||
renamed version of the `memory` backend).
|
renamed version of the `memory` backend).
|
||||||
|
|||||||
118
MAINTAINERS.md
118
MAINTAINERS.md
@@ -1,4 +1,4 @@
|
|||||||
# Maintainers guide for rclone #
|
# Maintainers guide for rclone
|
||||||
|
|
||||||
Current active maintainers of rclone are:
|
Current active maintainers of rclone are:
|
||||||
|
|
||||||
@@ -24,80 +24,108 @@ Current active maintainers of rclone are:
|
|||||||
| Dan McArdle | @dmcardle | gitannex |
|
| Dan McArdle | @dmcardle | gitannex |
|
||||||
| Sam Harrison | @childish-sambino | filescom |
|
| Sam Harrison | @childish-sambino | filescom |
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
## This is a work in progress draft
|
||||||
|
|
||||||
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
|
This is a guide for how to be an rclone maintainer. This is mostly a write-up
|
||||||
|
of what I (@ncw) attempt to do.
|
||||||
|
|
||||||
## Triaging Tickets ##
|
## Triaging Tickets
|
||||||
|
|
||||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
When a ticket comes in it should be triaged. This means it should be classified
|
||||||
|
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
|
||||||
|
of back and forth to determine whether it is a valid ticket so tickets may
|
||||||
|
remain without labels or milestone for a while.
|
||||||
|
|
||||||
Rclone uses the labels like this:
|
Rclone uses the labels like this:
|
||||||
|
|
||||||
* `bug` - a definitely verified bug
|
- `bug` - a definitely verified bug
|
||||||
* `can't reproduce` - a problem which we can't reproduce
|
- `can't reproduce` - a problem which we can't reproduce
|
||||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
- `doc fix` - a bug in the documentation - if users need help understanding the
|
||||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
docs add this label
|
||||||
* `enhancement: new remote` - a new rclone backend
|
- `duplicate` - normally close these and ask the user to subscribe to the original
|
||||||
* `enhancement` - a new feature
|
- `enhancement: new remote` - a new rclone backend
|
||||||
* `FUSE` - to do with `rclone mount` command
|
- `enhancement` - a new feature
|
||||||
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
|
- `FUSE` - to do with `rclone mount` command
|
||||||
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
|
- `good first issue` - mark these if you find a small self-contained issue -
|
||||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
these get shown to new visitors to the project
|
||||||
* `maintenance` - internal enhancement, code re-organisation, etc.
|
- `help` wanted - mark these if you find a self-contained issue - these get
|
||||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
shown to new visitors to the project
|
||||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||||
* `Remote: XXX` - which rclone backend this affects
|
- `maintenance` - internal enhancement, code re-organisation, etc.
|
||||||
* `thinking` - not decided on the course of action yet
|
- `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||||
|
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||||
|
- `Remote: XXX` - which rclone backend this affects
|
||||||
|
- `thinking` - not decided on the course of action yet
|
||||||
|
|
||||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
If it turns out to be a bug or an enhancement it should be tagged as such, with
|
||||||
|
the appropriate other tags. Don't forget the "good first issue" tag to give new
|
||||||
|
contributors something easy to do to get going.
|
||||||
|
|
||||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
|
When a ticket is tagged it should be added to a milestone, either the next
|
||||||
|
release, the one after, Soon or Help Wanted. Bugs can be added to the
|
||||||
|
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
|
||||||
|
something (e.g. the next go release).
|
||||||
|
|
||||||
The milestones have these meanings:
|
The milestones have these meanings:
|
||||||
|
|
||||||
* v1.XX - stuff we would like to fit into this release
|
- v1.XX - stuff we would like to fit into this release
|
||||||
* v1.XX+1 - stuff we are leaving until the next release
|
- v1.XX+1 - stuff we are leaving until the next release
|
||||||
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
|
||||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
- Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
- Known bugs - bugs waiting on external factors or we aren't going to fix for
|
||||||
|
the moment
|
||||||
|
|
||||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
|
||||||
|
are good candidates for ones that have slipped between the gaps and need
|
||||||
|
following up.
|
||||||
|
|
||||||
## Closing Tickets ##
|
## Closing Tickets
|
||||||
|
|
||||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
Close tickets as soon as you can - make sure they are tagged with a release.
|
||||||
|
Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||||
|
|
||||||
## Pull requests ##
|
## Pull requests
|
||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
Merging pull requests on GitHub itself works quite well nowadays so you can
|
||||||
|
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
|
||||||
|
Use the squash and rebase option if you need to edit the commit message.
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
After merging the commit, in your local master branch, do `git pull` then run
|
||||||
|
`bin/update-authors.py` to update the authors file then `git push`.
|
||||||
|
|
||||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
Sometimes pull requests need to be left open for a while - this especially true
|
||||||
|
of contributions of new backends which take a long time to get right.
|
||||||
|
|
||||||
## Merges ##
|
## Merges
|
||||||
|
|
||||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
If you are merging a branch locally then do `git merge --ff-only branch-name` to
|
||||||
|
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||||
|
|
||||||
## Release cycle ##
|
## Release cycle
|
||||||
|
|
||||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
|
||||||
|
if there is something big to merge that didn't stabilize properly or for personal
|
||||||
|
reasons.
|
||||||
|
|
||||||
High impact regressions should be fixed before the next release.
|
High impact regressions should be fixed before the next release.
|
||||||
|
|
||||||
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
|
Near the start of the release cycle, the dependencies should be updated with
|
||||||
|
`make update` to give time for bugs to surface.
|
||||||
|
|
||||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
Towards the end of the release cycle try not to merge anything too big so let
|
||||||
|
things settle down.
|
||||||
|
|
||||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
Follow the instructions in RELEASE.md for making the release. Note that the
|
||||||
|
testing part is the most time-consuming often needing several rounds of test
|
||||||
|
and fix depending on exactly how many new features rclone has gained.
|
||||||
|
|
||||||
## Mailing list ##
|
## Mailing list
|
||||||
|
|
||||||
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
|
There is now an invite-only mailing list for rclone developers `rclone-dev` on
|
||||||
|
google groups.
|
||||||
|
|
||||||
## TODO ##
|
## TODO
|
||||||
|
|
||||||
I should probably make a dev@rclone.org to register with cloud providers.
|
I should probably make a <dev@rclone.org> to register with cloud providers.
|
||||||
|
|||||||
52110
MANUAL.html
generated
52110
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8209
MANUAL.txt
generated
8209
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
@@ -144,6 +144,7 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
|
go generate ./lib/transform
|
||||||
-@rmdir -p '$$HOME/.config/rclone'
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
@@ -243,7 +244,7 @@ fetch_binaries:
|
|||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server --logLevel info -w --disableFastRender
|
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
|
||||||
|
|
||||||
tag: retag doc
|
tag: retag doc
|
||||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||||
|
|||||||
261
README.md
261
README.md
@@ -1,6 +1,6 @@
|
|||||||
|
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
|
||||||
|
|
||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||||
|
<!-- markdownlint-disable-next-line no-inline-html -->
|
||||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
@@ -18,102 +18,105 @@
|
|||||||
|
|
||||||
# Rclone
|
# Rclone
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
|
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||||
|
directories to and from different cloud storage providers.
|
||||||
|
|
||||||
## Storage providers
|
## Storage providers
|
||||||
|
|
||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||||
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||||
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||||
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||||
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
|
||||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
- MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
- Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||||
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||||
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
|
||||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
- put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||||
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||||
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||||
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
|
||||||
|
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
|
||||||
|
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
||||||
|
|
||||||
@@ -121,50 +124,54 @@ Please see [the full list of all storage providers and their features](https://r
|
|||||||
|
|
||||||
These backends adapt or modify other storage providers
|
These backends adapt or modify other storage providers
|
||||||
|
|
||||||
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||||
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||||
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||||
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||||
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
|
||||||
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
|
||||||
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
|
||||||
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
- MD5/SHA-1 hashes checked at all times for file integrity
|
||||||
* Timestamps preserved on files
|
- Timestamps preserved on files
|
||||||
* Partial syncs supported on a whole file basis
|
- Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
files
|
||||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
identical
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
bidirectionally
|
||||||
* Optional transparent compression ([Compress](https://rclone.org/compress/))
|
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
equality
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
- Can sync to and from network, e.g. two different cloud accounts
|
||||||
* Multi-threaded downloads to local disk
|
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
|
- Optional transparent compression ([Compress](https://rclone.org/compress/))
|
||||||
|
- Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||||
|
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||||
|
- Multi-threaded downloads to local disk
|
||||||
|
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
|
||||||
|
over HTTP/WebDAV/FTP/SFTP/DLNA
|
||||||
|
|
||||||
## Installation & documentation
|
## Installation & documentation
|
||||||
|
|
||||||
Please see the [rclone website](https://rclone.org/) for:
|
Please see the [rclone website](https://rclone.org/) for:
|
||||||
|
|
||||||
* [Installation](https://rclone.org/install/)
|
- [Installation](https://rclone.org/install/)
|
||||||
* [Documentation & configuration](https://rclone.org/docs/)
|
- [Documentation & configuration](https://rclone.org/docs/)
|
||||||
* [Changelog](https://rclone.org/changelog/)
|
- [Changelog](https://rclone.org/changelog/)
|
||||||
* [FAQ](https://rclone.org/faq/)
|
- [FAQ](https://rclone.org/faq/)
|
||||||
* [Storage providers](https://rclone.org/overview/)
|
- [Storage providers](https://rclone.org/overview/)
|
||||||
* [Forum](https://forum.rclone.org/)
|
- [Forum](https://forum.rclone.org/)
|
||||||
* ...and more
|
- ...and more
|
||||||
|
|
||||||
## Downloads
|
## Downloads
|
||||||
|
|
||||||
* https://rclone.org/downloads/
|
- <https://rclone.org/downloads/>
|
||||||
|
|
||||||
License
|
## License
|
||||||
-------
|
|
||||||
|
|
||||||
This is free software under the terms of the MIT license (check the
|
This is free software under the terms of the MIT license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
[COPYING file](/COPYING) included in this package).
|
||||||
|
|||||||
155
RELEASE.md
155
RELEASE.md
@@ -4,52 +4,55 @@ This file describes how to make the various kinds of releases
|
|||||||
|
|
||||||
## Extra required software for making a release
|
## Extra required software for making a release
|
||||||
|
|
||||||
* [gh the github cli](https://github.com/cli/cli) for uploading packages
|
- [gh the github cli](https://github.com/cli/cli) for uploading packages
|
||||||
* pandoc for making the html and man pages
|
- pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
* git checkout master # see below for stable branch
|
- git checkout master # see below for stable branch
|
||||||
* git pull # IMPORTANT
|
- git pull # IMPORTANT
|
||||||
* git status - make sure everything is checked in
|
- git status - make sure everything is checked in
|
||||||
* Check GitHub actions build for master is Green
|
- Check GitHub actions build for master is Green
|
||||||
* make test # see integration test server or run locally
|
- make test # see integration test server or run locally
|
||||||
* make tag
|
- make tag
|
||||||
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
|
||||||
* make tidy
|
releases
|
||||||
* make doc
|
- make tidy
|
||||||
* git status - to check for new man pages - git add them
|
- make doc
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
- git status - to check for new man pages - git add them
|
||||||
* make retag
|
- git commit -a -v -m "Version v1.XX.0"
|
||||||
* git push origin # without --follow-tags so it doesn't push the tag if it fails
|
- make retag
|
||||||
* git push --follow-tags origin
|
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||||
* # Wait for the GitHub builds to complete then...
|
- git push --follow-tags origin
|
||||||
* make fetch_binaries
|
- \# Wait for the GitHub builds to complete then...
|
||||||
* make tarball
|
- make fetch_binaries
|
||||||
* make vendorball
|
- make tarball
|
||||||
* make sign_upload
|
- make vendorball
|
||||||
* make check_sign
|
- make sign_upload
|
||||||
* make upload
|
- make check_sign
|
||||||
* make upload_website
|
- make upload
|
||||||
* make upload_github
|
- make upload_website
|
||||||
* make startdev # make startstable for stable branch
|
- make upload_github
|
||||||
* # announce with forum post, twitter post, patreon post
|
- make startdev # make startstable for stable branch
|
||||||
|
- \# announce with forum post, twitter post, patreon post
|
||||||
|
|
||||||
## Update dependencies
|
## Update dependencies
|
||||||
|
|
||||||
Early in the next release cycle update the dependencies.
|
Early in the next release cycle update the dependencies.
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
- Review any pinned packages in go.mod and remove if possible
|
||||||
* `make updatedirect`
|
- `make updatedirect`
|
||||||
* `make GOTAGS=cmount`
|
- `make GOTAGS=cmount`
|
||||||
* `make compiletest`
|
- `make compiletest`
|
||||||
* Fix anything which doesn't compile at this point and commit changes here
|
- Fix anything which doesn't compile at this point and commit changes here
|
||||||
* `git commit -a -v -m "build: update all dependencies"`
|
- `git commit -a -v -m "build: update all dependencies"`
|
||||||
|
|
||||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||||
|
|
||||||
go 1.22.0
|
```text
|
||||||
|
go 1.22.0
|
||||||
|
```
|
||||||
|
|
||||||
then go to manual mode. `go1.22` here is the lowest supported version
|
then go to manual mode. `go1.22` here is the lowest supported version
|
||||||
in the `go.mod`.
|
in the `go.mod`.
|
||||||
|
|
||||||
@@ -57,7 +60,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
|||||||
We don't want to force a toolchain on our users. Linux packagers are
|
We don't want to force a toolchain on our users. Linux packagers are
|
||||||
often using a version of Go that is a few versions out of date.
|
often using a version of Go that is a few versions out of date.
|
||||||
|
|
||||||
```
|
```sh
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
go get -d $(cat /tmp/potential-upgrades)
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
go mod tidy -go=1.22 -compat=1.22
|
||||||
@@ -67,7 +70,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
|||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||||
done
|
done
|
||||||
|
|
||||||
```
|
```sh
|
||||||
git co go.mod go.sum
|
git co go.mod go.sum
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -77,12 +80,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
|
|||||||
likely to fail if the manual method was used abve - in that case
|
likely to fail if the manual method was used abve - in that case
|
||||||
ignore it as it is too time consuming to fix.
|
ignore it as it is too time consuming to fix.
|
||||||
|
|
||||||
* `make update`
|
- `make update`
|
||||||
* `make GOTAGS=cmount`
|
- `make GOTAGS=cmount`
|
||||||
* `make compiletest`
|
- `make compiletest`
|
||||||
* roll back any updates which didn't compile
|
- roll back any updates which didn't compile
|
||||||
* `git commit -a -v --amend`
|
- `git commit -a -v --amend`
|
||||||
* **NB** watch out for this changing the default go version in `go.mod`
|
- **NB** watch out for this changing the default go version in `go.mod`
|
||||||
|
|
||||||
Note that `make update` updates all direct and indirect dependencies
|
Note that `make update` updates all direct and indirect dependencies
|
||||||
and there can occasionally be forwards compatibility problems with
|
and there can occasionally be forwards compatibility problems with
|
||||||
@@ -99,7 +102,9 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
|||||||
However this tool can show which major versions might need to be
|
However this tool can show which major versions might need to be
|
||||||
upgraded:
|
upgraded:
|
||||||
|
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
```sh
|
||||||
|
go run github.com/icholy/gomajor@latest list -major
|
||||||
|
```
|
||||||
|
|
||||||
Expect API breakage when updating major versions.
|
Expect API breakage when updating major versions.
|
||||||
|
|
||||||
@@ -107,7 +112,9 @@ Expect API breakage when updating major versions.
|
|||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
|
||||||
bin/tidy-beta v1.55
|
```sh
|
||||||
|
bin/tidy-beta v1.55
|
||||||
|
```
|
||||||
|
|
||||||
where the version number is that of a couple ago to remove old beta binaries.
|
where the version number is that of a couple ago to remove old beta binaries.
|
||||||
|
|
||||||
@@ -117,54 +124,64 @@ If rclone needs a point release due to some horrendous bug:
|
|||||||
|
|
||||||
Set vars
|
Set vars
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # e.g. v1.52
|
- BASE_TAG=v1.XX # e.g. v1.52
|
||||||
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
First make the release branch. If this is a second point release then
|
||||||
this will be done already.
|
this will be done already.
|
||||||
|
|
||||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||||
* make startstable
|
- make startstable
|
||||||
|
|
||||||
Now
|
Now
|
||||||
|
|
||||||
* git co ${BASE_TAG}-stable
|
- git co ${BASE_TAG}-stable
|
||||||
* git cherry-pick any fixes
|
- git cherry-pick any fixes
|
||||||
* make startstable
|
- make startstable
|
||||||
* Do the steps as above
|
- Do the steps as above
|
||||||
* git co master
|
- git co master
|
||||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
- `#` cherry pick the changes to the changelog - check the diff to make sure it
|
||||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
is correct
|
||||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||||
* git push
|
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||||
|
- git push
|
||||||
|
|
||||||
## Sponsor logos
|
## Sponsor logos
|
||||||
|
|
||||||
If updating the website note that the sponsor logos have been moved out of the main repository.
|
If updating the website note that the sponsor logos have been moved out of the
|
||||||
|
main repository.
|
||||||
|
|
||||||
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
|
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
|
||||||
which is a private repo containing artwork from sponsors.
|
which is a private repo containing artwork from sponsors.
|
||||||
|
|
||||||
## Update the website between releases
|
## Update the website between releases
|
||||||
|
|
||||||
Create an update website branch based off the last release
|
Create an update website branch based off the last release
|
||||||
|
|
||||||
git co -b update-website
|
```sh
|
||||||
|
git co -b update-website
|
||||||
|
```
|
||||||
|
|
||||||
If the branch already exists, double check there are no commits that need saving.
|
If the branch already exists, double check there are no commits that need saving.
|
||||||
|
|
||||||
Now reset the branch to the last release
|
Now reset the branch to the last release
|
||||||
|
|
||||||
git reset --hard v1.64.0
|
```sh
|
||||||
|
git reset --hard v1.64.0
|
||||||
|
```
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
Create the changes, check them in, test with `make serve` then
|
||||||
|
|
||||||
make upload_test_website
|
```sh
|
||||||
|
make upload_test_website
|
||||||
|
```
|
||||||
|
|
||||||
Check out https://test.rclone.org and when happy
|
Check out <https://test.rclone.org> and when happy
|
||||||
|
|
||||||
make upload_website
|
```sh
|
||||||
|
make upload_website
|
||||||
|
```
|
||||||
|
|
||||||
Cherry pick any changes back to master and the stable branch if it is active.
|
Cherry pick any changes back to master and the stable branch if it is active.
|
||||||
|
|
||||||
@@ -172,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
|||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
To do a basic build of rclone's docker image to debug builds locally:
|
||||||
|
|
||||||
```
|
```sh
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||||
docker run --rm rclone/rclone:testing version
|
docker run --rm rclone/rclone:testing version
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
To test the multipatform build
|
||||||
|
|
||||||
```
|
```sh
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -187,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
|||||||
|
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
Note that you can't only build one architecture - you need to build them all.
|
||||||
|
|
||||||
```
|
```sh
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/multipart"
|
"github.com/rclone/rclone/lib/multipart"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/pool"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -2670,6 +2671,13 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only account after the checksum reads have been done
|
||||||
|
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||||
|
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||||
|
// multiple of what it should be, increase or decrease this number.
|
||||||
|
do.DelayAccounting(2)
|
||||||
|
}
|
||||||
|
|
||||||
// Upload the block, with MD5 for check
|
// Upload the block, with MD5 for check
|
||||||
m := md5.New()
|
m := md5.New()
|
||||||
currentChunkSize, err := io.Copy(m, reader)
|
currentChunkSize, err := io.Copy(m, reader)
|
||||||
|
|||||||
@@ -453,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||||
}
|
}
|
||||||
case opt.UseAZ:
|
case opt.UseAZ:
|
||||||
var options = azidentity.AzureCLICredentialOptions{}
|
options := azidentity.AzureCLICredentialOptions{}
|
||||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||||
fmt.Println(cred)
|
fmt.Println(cred)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -550,7 +550,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
case opt.UseMSI:
|
case opt.UseMSI:
|
||||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||||
var b2i = map[bool]int{false: 0, true: 1}
|
b2i := map[bool]int{false: 0, true: 1}
|
||||||
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
|
||||||
if set > 1 {
|
if set > 1 {
|
||||||
return nil, errors.New("more than one user-assigned identity ID is set")
|
return nil, errors.New("more than one user-assigned identity ID is set")
|
||||||
@@ -583,7 +583,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
|
||||||
Scopes: []string{"api://AzureADTokenExchange"},
|
Scopes: []string{"api://AzureADTokenExchange"},
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
}
|
}
|
||||||
@@ -855,7 +854,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
|||||||
return entries, err
|
return entries, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var opt = &directory.ListFilesAndDirectoriesOptions{
|
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||||
Include: directory.ListFilesInclude{
|
Include: directory.ListFilesInclude{
|
||||||
Timestamps: true,
|
Timestamps: true,
|
||||||
},
|
},
|
||||||
@@ -1014,6 +1013,10 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
|||||||
SMBProperties: &file.SMBProperties{
|
SMBProperties: &file.SMBProperties{
|
||||||
LastWriteTime: &t,
|
LastWriteTime: &t,
|
||||||
},
|
},
|
||||||
|
HTTPHeaders: &file.HTTPHeaders{
|
||||||
|
ContentMD5: o.md5,
|
||||||
|
ContentType: &o.contentType,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
|
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1310,10 +1313,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
srcURL := srcObj.fileClient().URL()
|
srcURL := srcObj.fileClient().URL()
|
||||||
fc := f.fileClient(remote)
|
fc := f.fileClient(remote)
|
||||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Poll for completion if necessary
|
||||||
|
//
|
||||||
|
// The for loop is never executed for same storage account copies.
|
||||||
|
copyStatus := startCopy.CopyStatus
|
||||||
|
var properties file.GetPropertiesResponse
|
||||||
|
pollTime := 100 * time.Millisecond
|
||||||
|
|
||||||
|
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||||
|
time.Sleep(pollTime)
|
||||||
|
|
||||||
|
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
copyStatus = properties.CopyStatus
|
||||||
|
pollTime = min(2*pollTime, time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||||
|
|||||||
@@ -2192,13 +2192,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
return info, nil, err
|
return info, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
info = fs.ChunkWriterInfo{
|
info = fs.ChunkWriterInfo{
|
||||||
ChunkSize: int64(f.opt.ChunkSize),
|
ChunkSize: up.chunkSize,
|
||||||
Concurrency: o.fs.opt.UploadConcurrency,
|
Concurrency: o.fs.opt.UploadConcurrency,
|
||||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||||
}
|
}
|
||||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
return info, up, nil
|
||||||
return info, up, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
|
|||||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
|||||||
Offset int `json:"offset"`
|
Offset int `json:"offset"`
|
||||||
Limit int `json:"limit"`
|
Limit int `json:"limit"`
|
||||||
NextMarker *string `json:"next_marker,omitempty"`
|
NextMarker *string `json:"next_marker,omitempty"`
|
||||||
Order []struct {
|
// There is some confusion about how this is actually
|
||||||
By string `json:"by"`
|
// returned. The []struct has worked for many years, but in
|
||||||
Direction string `json:"direction"`
|
// https://github.com/rclone/rclone/issues/8776 box was
|
||||||
} `json:"order"`
|
// returning it returned not as a list. We don't actually use
|
||||||
|
// this so comment it out.
|
||||||
|
//
|
||||||
|
// Order struct {
|
||||||
|
// By string `json:"by"`
|
||||||
|
// Direction string `json:"direction"`
|
||||||
|
// } `json:"order"`
|
||||||
|
//
|
||||||
|
// Order []struct {
|
||||||
|
// By string `json:"by"`
|
||||||
|
// Direction string `json:"direction"`
|
||||||
|
// } `json:"order"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parent defined the ID of the parent directory
|
// Parent defined the ID of the parent directory
|
||||||
@@ -271,9 +282,9 @@ type User struct {
|
|||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Language string `json:"language"`
|
Language string `json:"language"`
|
||||||
Timezone string `json:"timezone"`
|
Timezone string `json:"timezone"`
|
||||||
SpaceAmount int64 `json:"space_amount"`
|
SpaceAmount float64 `json:"space_amount"`
|
||||||
SpaceUsed int64 `json:"space_used"`
|
SpaceUsed float64 `json:"space_used"`
|
||||||
MaxUploadSize int64 `json:"max_upload_size"`
|
MaxUploadSize float64 `json:"max_upload_size"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
JobTitle string `json:"job_title"`
|
JobTitle string `json:"job_title"`
|
||||||
Phone string `json:"phone"`
|
Phone string `json:"phone"`
|
||||||
|
|||||||
@@ -241,18 +241,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
|||||||
DirModTimeUpdatesOnWrite: true,
|
DirModTimeUpdatesOnWrite: true,
|
||||||
PartialUploads: true,
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
canMove := true
|
canMove, slowHash := true, false
|
||||||
for _, u := range f.upstreams {
|
for _, u := range f.upstreams {
|
||||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||||
if !operations.CanServerSideMove(u.f) {
|
if !operations.CanServerSideMove(u.f) {
|
||||||
canMove = false
|
canMove = false
|
||||||
}
|
}
|
||||||
|
slowHash = slowHash || u.f.Features().SlowHash
|
||||||
}
|
}
|
||||||
// We can move if all remotes support Move or Copy
|
// We can move if all remotes support Move or Copy
|
||||||
if canMove {
|
if canMove {
|
||||||
features.Move = f.Move
|
features.Move = f.Move
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If any of upstreams are SlowHash, propagate it
|
||||||
|
features.SlowHash = slowHash
|
||||||
|
|
||||||
// Enable ListR when upstreams either support ListR or is local
|
// Enable ListR when upstreams either support ListR or is local
|
||||||
// But not when all upstreams are local
|
// But not when all upstreams are local
|
||||||
if features.ListR == nil {
|
if features.ListR == nil {
|
||||||
|
|||||||
@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(used)), // bytes in use
|
Used: fs.NewUsageValue(used), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// errFileNotFound represent file not found error
|
// errFileNotFound represent file not found error
|
||||||
var errFileNotFound error = errors.New("file not found")
|
var errFileNotFound = errors.New("file not found")
|
||||||
|
|
||||||
// getFileCode retrieves the file code for a given file path
|
// getFileCode retrieves the file code for a given file path
|
||||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||||
|
|||||||
@@ -163,6 +163,16 @@ Enabled by default. Use 0 to disable.`,
|
|||||||
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "allow_insecure_tls_ciphers",
|
||||||
|
Help: `Allow insecure TLS ciphers
|
||||||
|
|
||||||
|
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
|
||||||
|
|
||||||
|
- TLS_RSA_WITH_AES_128_GCM_SHA256
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "shut_timeout",
|
Name: "shut_timeout",
|
||||||
Help: "Maximum time to wait for data connection closing status.",
|
Help: "Maximum time to wait for data connection closing status.",
|
||||||
@@ -236,29 +246,30 @@ a write only folder.
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Host string `config:"host"`
|
Host string `config:"host"`
|
||||||
User string `config:"user"`
|
User string `config:"user"`
|
||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
Port string `config:"port"`
|
Port string `config:"port"`
|
||||||
TLS bool `config:"tls"`
|
TLS bool `config:"tls"`
|
||||||
ExplicitTLS bool `config:"explicit_tls"`
|
ExplicitTLS bool `config:"explicit_tls"`
|
||||||
TLSCacheSize int `config:"tls_cache_size"`
|
TLSCacheSize int `config:"tls_cache_size"`
|
||||||
DisableTLS13 bool `config:"disable_tls13"`
|
DisableTLS13 bool `config:"disable_tls13"`
|
||||||
Concurrency int `config:"concurrency"`
|
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
Concurrency int `config:"concurrency"`
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||||
DisableMLSD bool `config:"disable_mlsd"`
|
DisableEPSV bool `config:"disable_epsv"`
|
||||||
DisableUTF8 bool `config:"disable_utf8"`
|
DisableMLSD bool `config:"disable_mlsd"`
|
||||||
WritingMDTM bool `config:"writing_mdtm"`
|
DisableUTF8 bool `config:"disable_utf8"`
|
||||||
ForceListHidden bool `config:"force_list_hidden"`
|
WritingMDTM bool `config:"writing_mdtm"`
|
||||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
ForceListHidden bool `config:"force_list_hidden"`
|
||||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||||
AskPassword bool `config:"ask_password"`
|
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
AskPassword bool `config:"ask_password"`
|
||||||
SocksProxy string `config:"socks_proxy"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
HTTPProxy string `config:"http_proxy"`
|
SocksProxy string `config:"socks_proxy"`
|
||||||
NoCheckUpload bool `config:"no_check_upload"`
|
HTTPProxy string `config:"http_proxy"`
|
||||||
|
NoCheckUpload bool `config:"no_check_upload"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@@ -272,6 +283,7 @@ type Fs struct {
|
|||||||
user string
|
user string
|
||||||
pass string
|
pass string
|
||||||
dialAddr string
|
dialAddr string
|
||||||
|
tlsConf *tls.Config // default TLS client config
|
||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||||
@@ -397,9 +409,14 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
func (f *Fs) tlsConfig() *tls.Config {
|
func (f *Fs) tlsConfig() *tls.Config {
|
||||||
var tlsConfig *tls.Config
|
var tlsConfig *tls.Config
|
||||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||||
tlsConfig = &tls.Config{
|
if f.tlsConf != nil {
|
||||||
ServerName: f.opt.Host,
|
tlsConfig = f.tlsConf.Clone()
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
} else {
|
||||||
|
tlsConfig = new(tls.Config)
|
||||||
|
}
|
||||||
|
tlsConfig.ServerName = f.opt.Host
|
||||||
|
if f.opt.SkipVerifyTLSCert {
|
||||||
|
tlsConfig.InsecureSkipVerify = true
|
||||||
}
|
}
|
||||||
if f.opt.TLSCacheSize > 0 {
|
if f.opt.TLSCacheSize > 0 {
|
||||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||||
@@ -407,6 +424,14 @@ func (f *Fs) tlsConfig() *tls.Config {
|
|||||||
if f.opt.DisableTLS13 {
|
if f.opt.DisableTLS13 {
|
||||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||||
}
|
}
|
||||||
|
if f.opt.AllowInsecureTLSCiphers {
|
||||||
|
var ids []uint16
|
||||||
|
// Read default ciphers
|
||||||
|
for _, cs := range tls.CipherSuites() {
|
||||||
|
ids = append(ids, cs.ID)
|
||||||
|
}
|
||||||
|
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return tlsConfig
|
return tlsConfig
|
||||||
}
|
}
|
||||||
@@ -652,6 +677,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
|||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
|
|||||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-east4",
|
Value: "us-east4",
|
||||||
Help: "Northern Virginia",
|
Help: "Northern Virginia",
|
||||||
|
}, {
|
||||||
|
Value: "us-east5",
|
||||||
|
Help: "Ohio",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon",
|
Help: "Oregon",
|
||||||
|
|||||||
@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &fs.Usage{
|
return &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(info.Capacity)),
|
Total: fs.NewUsageValue(info.Capacity),
|
||||||
Used: fs.NewUsageValue(int64(info.Used)),
|
Used: fs.NewUsageValue(info.Used),
|
||||||
Free: fs.NewUsageValue(int64(info.Remaining)),
|
Free: fs.NewUsageValue(info.Remaining),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
bucket, bucketPath := f.split(remote)
|
bucket, bucketPath := f.split(remote)
|
||||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server-side copy operations.
|
// Copy src to this remote using server-side copy operations.
|
||||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
|||||||
"x-archive-auto-make-bucket": "1",
|
"x-archive-auto-make-bucket": "1",
|
||||||
"x-archive-queue-derive": "0",
|
"x-archive-queue-derive": "0",
|
||||||
"x-archive-keep-old-version": "0",
|
"x-archive-keep-old-version": "0",
|
||||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||||
"x-amz-metadata-directive": "COPY",
|
"x-amz-metadata-directive": "COPY",
|
||||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||||
"x-archive-filemeta-md5": srcObj.md5,
|
"x-archive-filemeta-md5": srcObj.md5,
|
||||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// make a GET request to (frontend)/download/:item/:path
|
// make a GET request to (frontend)/download/:item/:path
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||||
Options: optionsFixed,
|
Options: optionsFixed,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
|||||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
|
||||||
func quotePath(s string) string {
|
|
||||||
seg := strings.Split(s, "/")
|
|
||||||
newValues := []string{}
|
|
||||||
for _, v := range seg {
|
|
||||||
newValues = append(newValues, url.PathEscape(v))
|
|
||||||
}
|
|
||||||
return strings.Join(newValues, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
_ fs.Copier = &Fs{}
|
_ fs.Copier = &Fs{}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
iofs "io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -671,8 +672,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
name := fi.Name()
|
name := fi.Name()
|
||||||
mode := fi.Mode()
|
mode := fi.Mode()
|
||||||
newRemote := f.cleanRemote(dir, name)
|
newRemote := f.cleanRemote(dir, name)
|
||||||
|
symlinkFlag := os.ModeSymlink
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
symlinkFlag |= os.ModeIrregular
|
||||||
|
}
|
||||||
// Follow symlinks if required
|
// Follow symlinks if required
|
||||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
|
||||||
localPath := filepath.Join(fsDirPath, name)
|
localPath := filepath.Join(fsDirPath, name)
|
||||||
fi, err = os.Stat(localPath)
|
fi, err = os.Stat(localPath)
|
||||||
// Quietly skip errors on excluded files and directories
|
// Quietly skip errors on excluded files and directories
|
||||||
@@ -694,13 +699,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
// Ignore directories which are symlinks. These are junction points under windows which
|
// Ignore directories which are symlinks. These are junction points under windows which
|
||||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||||
d := f.newDirectory(newRemote, fi)
|
d := f.newDirectory(newRemote, fi)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Check whether this link should be translated
|
// Check whether this link should be translated
|
||||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
|
||||||
newRemote += fs.LinkSuffix
|
newRemote += fs.LinkSuffix
|
||||||
}
|
}
|
||||||
// Don't include non directory if not included
|
// Don't include non directory if not included
|
||||||
@@ -837,7 +842,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
} else if !fi.IsDir() {
|
} else if !fi.IsDir() {
|
||||||
return fs.ErrorIsFile
|
return fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return os.Remove(localPath)
|
err := os.Remove(localPath)
|
||||||
|
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||||
|
if os.Chmod(localPath, 0o600) == nil {
|
||||||
|
err = os.Remove(localPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision of the file system
|
// Precision of the file system
|
||||||
|
|||||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package local
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||||
|
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||||
|
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||||
|
// and https://github.com/golang/go/issues/26295
|
||||||
|
func TestRmdirWindows(t *testing.T) {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
t.Skipf("windows only")
|
||||||
|
}
|
||||||
|
r := fstest.NewRun(t)
|
||||||
|
defer r.Finalise()
|
||||||
|
|
||||||
|
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
@@ -946,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
|
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
|
||||||
}
|
}
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
|
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
|
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
|
||||||
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
|
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/swift/v2"
|
||||||
"github.com/oracle/oci-go-sdk/v65/common"
|
"github.com/oracle/oci-go-sdk/v65/common"
|
||||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -33,9 +34,46 @@ func init() {
|
|||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
Options: newOptions(),
|
Options: newOptions(),
|
||||||
|
MetadataInfo: &fs.MetadataInfo{
|
||||||
|
System: systemMetadataInfo,
|
||||||
|
Help: `User metadata is stored as opc-meta- keys.`,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||||
|
"opc-meta-mode": {
|
||||||
|
Help: "File type and mode",
|
||||||
|
Type: "octal, unix style",
|
||||||
|
Example: "0100664",
|
||||||
|
},
|
||||||
|
"opc-meta-uid": {
|
||||||
|
Help: "User ID of owner",
|
||||||
|
Type: "decimal number",
|
||||||
|
Example: "500",
|
||||||
|
},
|
||||||
|
"opc-meta-gid": {
|
||||||
|
Help: "Group ID of owner",
|
||||||
|
Type: "decimal number",
|
||||||
|
Example: "500",
|
||||||
|
},
|
||||||
|
"opc-meta-atime": {
|
||||||
|
Help: "Time of last access",
|
||||||
|
Type: "ISO 8601",
|
||||||
|
Example: "2025-06-30T22:27:43-04:00",
|
||||||
|
},
|
||||||
|
"opc-meta-mtime": {
|
||||||
|
Help: "Time of last modification",
|
||||||
|
Type: "ISO 8601",
|
||||||
|
Example: "2025-06-30T22:27:43-04:00",
|
||||||
|
},
|
||||||
|
"opc-meta-btime": {
|
||||||
|
Help: "Time of file birth (creation)",
|
||||||
|
Type: "ISO 8601",
|
||||||
|
Example: "2025-06-30T22:27:43-04:00",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote object storage server
|
// Fs represents a remote object storage server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
@@ -82,6 +120,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
|
ReadMetadata: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
@@ -688,6 +727,38 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
return list.Flush()
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Metadata returns metadata for an object
|
||||||
|
//
|
||||||
|
// It should return nil if there is no Metadata
|
||||||
|
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||||
|
err = o.readMetaData(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
metadata = make(fs.Metadata, len(o.meta)+7)
|
||||||
|
for k, v := range o.meta {
|
||||||
|
switch k {
|
||||||
|
case metaMtime:
|
||||||
|
if modTime, err := swift.FloatStringToTime(v); err == nil {
|
||||||
|
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
case metaMD5Hash:
|
||||||
|
// don't write hash metadata
|
||||||
|
default:
|
||||||
|
metadata[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if o.mimeType != "" {
|
||||||
|
metadata["content-type"] = o.mimeType
|
||||||
|
}
|
||||||
|
|
||||||
|
if !o.lastModified.IsZero() {
|
||||||
|
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
@@ -136,8 +137,25 @@ type Link struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||||
|
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
|
||||||
func (l *Link) Valid() bool {
|
func (l *Link) Valid() bool {
|
||||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
if l == nil || l.URL == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Primary validation: check URL's expire query parameter
|
||||||
|
if u, err := url.Parse(l.URL); err == nil {
|
||||||
|
if expireStr := u.Query().Get("expire"); expireStr != "" {
|
||||||
|
// Try parsing as Unix timestamp (seconds)
|
||||||
|
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
|
||||||
|
expireTime := time.Unix(expireInt, 0)
|
||||||
|
return time.Now().Add(10 * time.Second).Before(expireTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback validation: use the Expire field if URL parsing didn't work
|
||||||
|
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
|
||||||
}
|
}
|
||||||
|
|
||||||
// URL is a basic form of URL
|
// URL is a basic form of URL
|
||||||
|
|||||||
99
backend/pikpak/api/types_test.go
Normal file
99
backend/pikpak/api/types_test.go
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestLinkValid tests the Link.Valid method for various scenarios
|
||||||
|
func TestLinkValid(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
link *Link
|
||||||
|
expected bool
|
||||||
|
desc string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil link",
|
||||||
|
link: nil,
|
||||||
|
expected: false,
|
||||||
|
desc: "nil link should be invalid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty URL",
|
||||||
|
link: &Link{URL: ""},
|
||||||
|
expected: false,
|
||||||
|
desc: "empty URL should be invalid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid URL with future expire parameter",
|
||||||
|
link: &Link{
|
||||||
|
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
desc: "URL with future expire parameter should be valid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "expired URL with past expire parameter",
|
||||||
|
link: &Link{
|
||||||
|
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
desc: "URL with past expire parameter should be invalid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "URL expire parameter takes precedence over Expire field",
|
||||||
|
link: &Link{
|
||||||
|
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||||
|
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
desc: "URL expire parameter should take precedence over Expire field",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "URL expire parameter within 10 second buffer should be invalid",
|
||||||
|
link: &Link{
|
||||||
|
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
desc: "URL expire parameter within 10 second buffer should be invalid",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fallback to Expire field when no URL expire parameter",
|
||||||
|
link: &Link{
|
||||||
|
URL: "https://example.com/file",
|
||||||
|
Expire: Time(time.Now().Add(time.Hour)),
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
desc: "should fallback to Expire field when URL has no expire parameter",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fallback to Expire field when URL expire parameter is invalid",
|
||||||
|
link: &Link{
|
||||||
|
URL: "https://example.com/file?expire=invalid",
|
||||||
|
Expire: Time(time.Now().Add(time.Hour)),
|
||||||
|
},
|
||||||
|
expected: true,
|
||||||
|
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid when both URL expire and Expire field are expired",
|
||||||
|
link: &Link{
|
||||||
|
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||||
|
Expire: Time(time.Now().Add(-time.Hour)),
|
||||||
|
},
|
||||||
|
expected: false,
|
||||||
|
desc: "should be invalid when both URL expire and Expire field are expired",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := tt.link.Valid()
|
||||||
|
if result != tt.expected {
|
||||||
|
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -979,6 +979,24 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// untrash a file or directory by ID
|
||||||
|
//
|
||||||
|
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||||
|
// rename the restored item(s) by appending a numbered suffix. For example,
|
||||||
|
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||||
|
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
|
||||||
|
if len(IDs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
req := api.RequestBatch{
|
||||||
|
IDs: IDs,
|
||||||
|
}
|
||||||
|
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
|
||||||
|
return fmt.Errorf("untrash object failed: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
@@ -1063,7 +1081,14 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|||||||
return f.waitTask(ctx, info.TaskID)
|
return f.waitTask(ctx, info.TaskID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move the object
|
// Move the object to a new parent folder
|
||||||
|
//
|
||||||
|
// Objects cannot be moved to their current folder.
|
||||||
|
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||||
|
//
|
||||||
|
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||||
|
// rename the moved item(s) by appending a numbered suffix. For example,
|
||||||
|
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||||
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||||
if len(IDs) == 0 {
|
if len(IDs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -1079,6 +1104,12 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// renames the object
|
// renames the object
|
||||||
|
//
|
||||||
|
// The new name must be different from the current name.
|
||||||
|
// "file_rename_to_same_name" (3): Name of file or folder is not changed
|
||||||
|
//
|
||||||
|
// Within the same folder, object names must be unique.
|
||||||
|
// "file_duplicated_name" (3): File name cannot be repeated
|
||||||
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
|
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
|
||||||
req := api.File{
|
req := api.File{
|
||||||
Name: f.opt.Enc.FromStandardName(newName),
|
Name: f.opt.Enc.FromStandardName(newName),
|
||||||
@@ -1163,18 +1194,13 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData(ctx)
|
err = srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1185,31 +1211,74 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcParentID != dstParentID {
|
if srcObj.parent != dstParentID {
|
||||||
// Do the move
|
// Perform the move. A numbered copy might be generated upon name collision.
|
||||||
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
// FIXME: Restored file might have a numbered name if a conflict occurs
|
||||||
|
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
|
||||||
|
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
// Manually update info of moved object to save API calls
|
|
||||||
dstObj.id = srcObj.id
|
|
||||||
dstObj.mimeType = srcObj.mimeType
|
|
||||||
dstObj.gcid = srcObj.gcid
|
|
||||||
dstObj.md5sum = srcObj.md5sum
|
|
||||||
dstObj.hasMetaData = true
|
|
||||||
|
|
||||||
if srcLeaf != dstLeaf {
|
// Find the moved object and any conflict object with the same name.
|
||||||
// Rename
|
var moved, conflict *api.File
|
||||||
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
|
||||||
if err != nil {
|
if item.ID == srcObj.id {
|
||||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
moved = item
|
||||||
|
if item.Name == dstLeaf {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else if item.Name == dstLeaf {
|
||||||
|
conflict = item
|
||||||
}
|
}
|
||||||
return dstObj, dstObj.setMetaData(info)
|
// Stop early if both found
|
||||||
|
return moved != nil && conflict != nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
if moved == nil {
|
||||||
|
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If moved object already has the correct name, return
|
||||||
|
if moved.Name == dstLeaf {
|
||||||
|
return dstObj, dstObj.setMetaData(moved)
|
||||||
|
}
|
||||||
|
// If name collision, delete conflicting file first
|
||||||
|
if conflict != nil {
|
||||||
|
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||||
|
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||||
|
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
|
||||||
|
}
|
||||||
|
return dstObj, dstObj.setMetaData(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy objects
|
// copy objects
|
||||||
|
//
|
||||||
|
// Objects cannot be copied to their current folder.
|
||||||
|
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
|
||||||
|
//
|
||||||
|
// If a name collision occurs in the destination folder, PikPak might automatically
|
||||||
|
// rename the copied item(s) by appending a numbered suffix. For example,
|
||||||
|
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
|
||||||
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
|
||||||
if len(IDs) == 0 {
|
if len(IDs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -1233,13 +1302,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData(ctx)
|
err = srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1254,31 +1323,55 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't copy - same parent")
|
fs.Debugf(src, "Can't copy - same parent")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
|
||||||
|
var conflict *api.File
|
||||||
|
_, srcLeaf := dircache.SplitPath(srcObj.remote)
|
||||||
|
if srcLeaf == dstLeaf {
|
||||||
|
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
|
||||||
|
// delete conflicting file
|
||||||
|
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
|
||||||
|
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
|
||||||
|
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dstDir, _ := dircache.SplitPath(remote)
|
||||||
|
dstObj.remote = path.Join(dstDir, srcLeaf)
|
||||||
|
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
|
||||||
|
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
|
||||||
|
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
|
||||||
|
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
|
||||||
|
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
} else if err != fs.ErrorObjectNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||||
}
|
}
|
||||||
// Update info of the copied object with new parent but source name
|
err = dstObj.readMetaData(ctx)
|
||||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
|
||||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
|
||||||
} else if err = dstObj.setMetaData(info); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Can't copy and change name in one step so we have to check if we have
|
|
||||||
// the correct name after copy
|
|
||||||
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if srcLeaf != dstLeaf {
|
if srcLeaf != dstLeaf {
|
||||||
// Rename
|
return f.Move(ctx, dstObj, remote)
|
||||||
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
|
||||||
}
|
|
||||||
return dstObj, dstObj.setMetaData(info)
|
|
||||||
}
|
}
|
||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
@@ -1415,8 +1508,30 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
|
|||||||
}
|
}
|
||||||
if new.File == nil {
|
if new.File == nil {
|
||||||
return nil, fmt.Errorf("invalid response: %+v", new)
|
return nil, fmt.Errorf("invalid response: %+v", new)
|
||||||
} else if new.File.Phase == api.PhaseTypeComplete {
|
}
|
||||||
// early return; in case of zero-byte objects
|
|
||||||
|
defer atexit.OnError(&err, func() {
|
||||||
|
fs.Debugf(leaf, "canceling upload: %v", err)
|
||||||
|
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
|
||||||
|
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
||||||
|
}
|
||||||
|
if new.Task != nil {
|
||||||
|
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
|
||||||
|
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
||||||
|
}
|
||||||
|
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
|
||||||
|
time.Sleep(taskWaitTime)
|
||||||
|
}
|
||||||
|
})()
|
||||||
|
|
||||||
|
// Note: The API might automatically append a numbered suffix to the filename,
|
||||||
|
// even if a file with the same name does not exist in the target directory.
|
||||||
|
if upName := f.opt.Enc.ToStandardName(new.File.Name); leaf != upName {
|
||||||
|
return nil, fserrors.NoRetryError(fmt.Errorf("uploaded file name mismatch: expected %q, got %q", leaf, upName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// early return; in case of zero-byte objects or uploaded by matched gcid
|
||||||
|
if new.File.Phase == api.PhaseTypeComplete {
|
||||||
if acc, ok := in.(*accounting.Account); ok && acc != nil {
|
if acc, ok := in.(*accounting.Account); ok && acc != nil {
|
||||||
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
|
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
|
||||||
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
|
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
|
||||||
@@ -1426,18 +1541,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
|
|||||||
return new.File, nil
|
return new.File, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
defer atexit.OnError(&err, func() {
|
|
||||||
fs.Debugf(leaf, "canceling upload: %v", err)
|
|
||||||
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
|
|
||||||
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
|
||||||
}
|
|
||||||
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
|
|
||||||
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
|
||||||
}
|
|
||||||
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
|
|
||||||
time.Sleep(taskWaitTime)
|
|
||||||
})()
|
|
||||||
|
|
||||||
if uploadType == api.UploadTypeForm && new.Form != nil {
|
if uploadType == api.UploadTypeForm && new.Form != nil {
|
||||||
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
|
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
|
||||||
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
|
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
|
||||||
@@ -1449,6 +1552,9 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to upload: %w", err)
|
return nil, fmt.Errorf("failed to upload: %w", err)
|
||||||
}
|
}
|
||||||
|
if new.Task == nil {
|
||||||
|
return new.File, nil
|
||||||
|
}
|
||||||
return new.File, f.waitTask(ctx, new.Task.ID)
|
return new.File, f.waitTask(ctx, new.Task.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
|
Used: fs.NewUsageValue(info.SpaceUsed),
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|||||||
176
backend/s3/s3.go
176
backend/s3/s3.go
@@ -149,6 +149,9 @@ var providerOption = fs.Option{
|
|||||||
}, {
|
}, {
|
||||||
Value: "Outscale",
|
Value: "Outscale",
|
||||||
Help: "OUTSCALE Object Storage (OOS)",
|
Help: "OUTSCALE Object Storage (OOS)",
|
||||||
|
}, {
|
||||||
|
Value: "OVHcloud",
|
||||||
|
Help: "OVHcloud Object Storage",
|
||||||
}, {
|
}, {
|
||||||
Value: "Petabox",
|
Value: "Petabox",
|
||||||
Help: "Petabox Object Storage",
|
Help: "Petabox Object Storage",
|
||||||
@@ -535,6 +538,59 @@ func init() {
|
|||||||
Value: "ap-northeast-1",
|
Value: "ap-northeast-1",
|
||||||
Help: "Tokyo, Japan",
|
Help: "Tokyo, Japan",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// References:
|
||||||
|
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
|
||||||
|
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
|
||||||
|
Name: "region",
|
||||||
|
Help: "Region where your bucket will be created and your data stored.\n",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "gra",
|
||||||
|
Help: "Gravelines, France",
|
||||||
|
}, {
|
||||||
|
Value: "rbx",
|
||||||
|
Help: "Roubaix, France",
|
||||||
|
}, {
|
||||||
|
Value: "sbg",
|
||||||
|
Help: "Strasbourg, France",
|
||||||
|
}, {
|
||||||
|
Value: "eu-west-par",
|
||||||
|
Help: "Paris, France (3AZ)",
|
||||||
|
}, {
|
||||||
|
Value: "de",
|
||||||
|
Help: "Frankfurt, Germany",
|
||||||
|
}, {
|
||||||
|
Value: "uk",
|
||||||
|
Help: "London, United Kingdom",
|
||||||
|
}, {
|
||||||
|
Value: "waw",
|
||||||
|
Help: "Warsaw, Poland",
|
||||||
|
}, {
|
||||||
|
Value: "bhs",
|
||||||
|
Help: "Beauharnois, Canada",
|
||||||
|
}, {
|
||||||
|
Value: "ca-east-tor",
|
||||||
|
Help: "Toronto, Canada",
|
||||||
|
}, {
|
||||||
|
Value: "sgp",
|
||||||
|
Help: "Singapore",
|
||||||
|
}, {
|
||||||
|
Value: "ap-southeast-syd",
|
||||||
|
Help: "Sydney, Australia",
|
||||||
|
}, {
|
||||||
|
Value: "ap-south-mum",
|
||||||
|
Help: "Mumbai, India",
|
||||||
|
}, {
|
||||||
|
Value: "us-east-va",
|
||||||
|
Help: "Vint Hill, Virginia, USA",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-or",
|
||||||
|
Help: "Hillsboro, Oregon, USA",
|
||||||
|
}, {
|
||||||
|
Value: "rbx-archive",
|
||||||
|
Help: "Roubaix, France (Cold Archive)",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region where your bucket will be created and your data stored.\n",
|
Help: "Region where your bucket will be created and your data stored.\n",
|
||||||
@@ -587,7 +643,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||||
@@ -1174,6 +1230,71 @@ func init() {
|
|||||||
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
Value: "obs.ru-northwest-2.myhuaweicloud.com",
|
||||||
Help: "RU-Moscow2",
|
Help: "RU-Moscow2",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "endpoint",
|
||||||
|
Help: "Endpoint for OVHcloud Object Storage.",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "s3.gra.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Gravelines, France",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.rbx.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Roubaix, France",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.sbg.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Strasbourg, France",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.eu-west-par.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Paris, France (3AZ)",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.de.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Frankfurt, Germany",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.uk.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud London, United Kingdom",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.waw.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Warsaw, Poland",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.bhs.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Beauharnois, Canada",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.ca-east-tor.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Toronto, Canada",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.sgp.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Singapore",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Sydney, Australia",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.ap-south-mum.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Mumbai, India",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.us-east-va.io.cloud.ovh.us",
|
||||||
|
Help: "OVHcloud Vint Hill, Virginia, USA",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.us-west-or.io.cloud.ovh.us",
|
||||||
|
Help: "OVHcloud Hillsboro, Oregon, USA",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}, {
|
||||||
|
Value: "s3.rbx-archive.io.cloud.ovh.net",
|
||||||
|
Help: "OVHcloud Roubaix, France (Cold Archive)",
|
||||||
|
Provider: "OVHcloud",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for Scaleway Object Storage.",
|
Help: "Endpoint for Scaleway Object Storage.",
|
||||||
@@ -1411,7 +1532,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
@@ -1946,7 +2067,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
@@ -2428,6 +2549,11 @@ See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/lates
|
|||||||
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
|
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_arn_region",
|
||||||
|
Help: `If true, enables arn region support for the service.`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "leave_parts_on_error",
|
Name: "leave_parts_on_error",
|
||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
@@ -2975,6 +3101,7 @@ type Options struct {
|
|||||||
ForcePathStyle bool `config:"force_path_style"`
|
ForcePathStyle bool `config:"force_path_style"`
|
||||||
V2Auth bool `config:"v2_auth"`
|
V2Auth bool `config:"v2_auth"`
|
||||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||||
|
UseARNRegion bool `config:"use_arn_region"`
|
||||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||||
ListChunk int32 `config:"list_chunk"`
|
ListChunk int32 `config:"list_chunk"`
|
||||||
ListVersion int `config:"list_version"`
|
ListVersion int `config:"list_version"`
|
||||||
@@ -3339,6 +3466,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
|||||||
options = append(options, func(s3Opt *s3.Options) {
|
options = append(options, func(s3Opt *s3.Options) {
|
||||||
s3Opt.UsePathStyle = opt.ForcePathStyle
|
s3Opt.UsePathStyle = opt.ForcePathStyle
|
||||||
s3Opt.UseAccelerate = opt.UseAccelerateEndpoint
|
s3Opt.UseAccelerate = opt.UseAccelerateEndpoint
|
||||||
|
s3Opt.UseARNRegion = opt.UseARNRegion
|
||||||
// FIXME maybe this should be a tristate so can default to DualStackEndpointStateUnset?
|
// FIXME maybe this should be a tristate so can default to DualStackEndpointStateUnset?
|
||||||
if opt.UseDualStack {
|
if opt.UseDualStack {
|
||||||
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled
|
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled
|
||||||
@@ -3589,6 +3717,8 @@ func setQuirks(opt *Options) {
|
|||||||
useAlreadyExists = false // untested
|
useAlreadyExists = false // untested
|
||||||
case "Outscale":
|
case "Outscale":
|
||||||
virtualHostStyle = false
|
virtualHostStyle = false
|
||||||
|
case "OVHcloud":
|
||||||
|
// No quirks
|
||||||
case "RackCorp":
|
case "RackCorp":
|
||||||
// No quirks
|
// No quirks
|
||||||
useMultipartEtag = false // untested
|
useMultipartEtag = false // untested
|
||||||
@@ -5963,7 +6093,7 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
|||||||
o.storageClass = stringClone(string(resp.StorageClass))
|
o.storageClass = stringClone(string(resp.StorageClass))
|
||||||
o.cacheControl = stringClonePointer(resp.CacheControl)
|
o.cacheControl = stringClonePointer(resp.CacheControl)
|
||||||
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
|
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
|
||||||
o.contentEncoding = stringClonePointer(resp.ContentEncoding)
|
o.contentEncoding = stringClonePointer(removeAWSChunked(resp.ContentEncoding))
|
||||||
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
|
||||||
|
|
||||||
// If decompressing then size and md5sum are unknown
|
// If decompressing then size and md5sum are unknown
|
||||||
@@ -6031,6 +6161,36 @@ func (o *Object) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// removeAWSChunked removes the "aws-chunked" content-coding from a
|
||||||
|
// Content-Encoding field value (RFC 9110). Comparison is case-insensitive.
|
||||||
|
// Returns nil if encoding is empty after removal.
|
||||||
|
func removeAWSChunked(pv *string) *string {
|
||||||
|
if pv == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
v := *pv
|
||||||
|
if v == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !strings.Contains(strings.ToLower(v), "aws-chunked") {
|
||||||
|
return pv
|
||||||
|
}
|
||||||
|
parts := strings.Split(v, ",")
|
||||||
|
out := make([]string, 0, len(parts))
|
||||||
|
for _, p := range parts {
|
||||||
|
tok := strings.TrimSpace(p)
|
||||||
|
if tok == "" || strings.EqualFold(tok, "aws-chunked") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, tok)
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
v = strings.Join(out, ",")
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.fs.opt.DownloadURL + bucketPath
|
url := o.fs.opt.DownloadURL + bucketPath
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -6199,7 +6359,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
o.setMetaData(&head)
|
o.setMetaData(&head)
|
||||||
|
|
||||||
// Decompress body if necessary
|
// Decompress body if necessary
|
||||||
if deref(resp.ContentEncoding) == "gzip" {
|
if deref(removeAWSChunked(resp.ContentEncoding)) == "gzip" {
|
||||||
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
|
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
|
||||||
return readers.NewGzipReader(resp.Body)
|
return readers.NewGzipReader(resp.Body)
|
||||||
}
|
}
|
||||||
@@ -6481,7 +6641,7 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
|
|||||||
return wantETag, gotETag, versionID, ui, err
|
return wantETag, gotETag, versionID, ui, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
|
s3cw := chunkWriter.(*s3ChunkWriter)
|
||||||
gotETag = *stringClone(s3cw.eTag)
|
gotETag = *stringClone(s3cw.eTag)
|
||||||
versionID = stringClone(s3cw.versionID)
|
versionID = stringClone(s3cw.versionID)
|
||||||
|
|
||||||
@@ -6623,7 +6783,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
case "content-disposition":
|
case "content-disposition":
|
||||||
ui.req.ContentDisposition = pv
|
ui.req.ContentDisposition = pv
|
||||||
case "content-encoding":
|
case "content-encoding":
|
||||||
ui.req.ContentEncoding = pv
|
ui.req.ContentEncoding = removeAWSChunked(pv)
|
||||||
case "content-language":
|
case "content-language":
|
||||||
ui.req.ContentLanguage = pv
|
ui.req.ContentLanguage = pv
|
||||||
case "content-type":
|
case "content-type":
|
||||||
@@ -6720,7 +6880,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
case "content-disposition":
|
case "content-disposition":
|
||||||
ui.req.ContentDisposition = aws.String(value)
|
ui.req.ContentDisposition = aws.String(value)
|
||||||
case "content-encoding":
|
case "content-encoding":
|
||||||
ui.req.ContentEncoding = aws.String(value)
|
ui.req.ContentEncoding = removeAWSChunked(aws.String(value))
|
||||||
case "content-language":
|
case "content-language":
|
||||||
ui.req.ContentLanguage = aws.String(value)
|
ui.req.ContentLanguage = aws.String(value)
|
||||||
case "content-type":
|
case "content-type":
|
||||||
|
|||||||
@@ -248,6 +248,47 @@ func TestMergeDeleteMarkers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRemoveAWSChunked(t *testing.T) {
|
||||||
|
ps := func(s string) *string {
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
in *string
|
||||||
|
want *string
|
||||||
|
}{
|
||||||
|
{"nil", nil, nil},
|
||||||
|
{"empty", ps(""), nil},
|
||||||
|
{"only aws", ps("aws-chunked"), nil},
|
||||||
|
{"leading aws", ps("aws-chunked, gzip"), ps("gzip")},
|
||||||
|
{"trailing aws", ps("gzip, aws-chunked"), ps("gzip")},
|
||||||
|
{"middle aws", ps("gzip, aws-chunked, br"), ps("gzip,br")},
|
||||||
|
{"case insensitive", ps("GZip, AwS-ChUnKeD, Br"), ps("GZip,Br")},
|
||||||
|
{"duplicates", ps("aws-chunked , aws-chunked"), nil},
|
||||||
|
{"no aws normalize spaces", ps(" gzip , br "), ps(" gzip , br ")},
|
||||||
|
{"surrounding spaces", ps(" aws-chunked "), nil},
|
||||||
|
{"no change", ps("gzip, br"), ps("gzip, br")},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
got := removeAWSChunked(tc.in)
|
||||||
|
check := func(want, got *string) {
|
||||||
|
t.Helper()
|
||||||
|
if tc.want == nil {
|
||||||
|
assert.Nil(t, got)
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, got)
|
||||||
|
assert.Equal(t, *tc.want, *got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
check(tc.want, got)
|
||||||
|
// Idempotent
|
||||||
|
got2 := removeAWSChunked(got)
|
||||||
|
check(got, got2)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) InternalTestVersions(t *testing.T) {
|
func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
|||||||
@@ -1863,9 +1863,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
free := vfsStats.FreeSpace()
|
free := vfsStats.FreeSpace()
|
||||||
used := total - free
|
used := total - free
|
||||||
return &fs.Usage{
|
return &fs.Usage{
|
||||||
Total: fs.NewUsageValue(int64(total)),
|
Total: fs.NewUsageValue(total),
|
||||||
Used: fs.NewUsageValue(int64(used)),
|
Used: fs.NewUsageValue(used),
|
||||||
Free: fs.NewUsageValue(int64(free)),
|
Free: fs.NewUsageValue(free),
|
||||||
}, nil
|
}, nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
|||||||
99
backend/smb/filepool.go
Normal file
99
backend/smb/filepool.go
Normal file
@@ -0,0 +1,99 @@
|
|||||||
|
package smb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cloudsoda/go-smb2"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FsInterface defines the methods that filePool needs from Fs
|
||||||
|
type FsInterface interface {
|
||||||
|
getConnection(ctx context.Context, share string) (*conn, error)
|
||||||
|
putConnection(pc **conn, err error)
|
||||||
|
removeSession()
|
||||||
|
}
|
||||||
|
|
||||||
|
type file struct {
|
||||||
|
*smb2.File
|
||||||
|
c *conn
|
||||||
|
}
|
||||||
|
|
||||||
|
type filePool struct {
|
||||||
|
ctx context.Context
|
||||||
|
fs FsInterface
|
||||||
|
share string
|
||||||
|
path string
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
pool []*file
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFilePool(ctx context.Context, fs FsInterface, share, path string) *filePool {
|
||||||
|
return &filePool{
|
||||||
|
ctx: ctx,
|
||||||
|
fs: fs,
|
||||||
|
share: share,
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *filePool) get() (*file, error) {
|
||||||
|
p.mu.Lock()
|
||||||
|
if len(p.pool) > 0 {
|
||||||
|
f := p.pool[len(p.pool)-1]
|
||||||
|
p.pool = p.pool[:len(p.pool)-1]
|
||||||
|
p.mu.Unlock()
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
c, err := p.fs.getConnection(p.ctx, p.share)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fl, err := c.smbShare.OpenFile(p.path, os.O_WRONLY, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
p.fs.putConnection(&c, err)
|
||||||
|
return nil, fmt.Errorf("failed to open: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &file{File: fl, c: c}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *filePool) put(f *file, err error) {
|
||||||
|
if f == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
p.fs.putConnection(&f.c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mu.Lock()
|
||||||
|
p.pool = append(p.pool, f)
|
||||||
|
p.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *filePool) drain() error {
|
||||||
|
p.mu.Lock()
|
||||||
|
files := p.pool
|
||||||
|
p.pool = nil
|
||||||
|
p.mu.Unlock()
|
||||||
|
|
||||||
|
g, _ := errgroup.WithContext(p.ctx)
|
||||||
|
for _, f := range files {
|
||||||
|
g.Go(func() error {
|
||||||
|
err := f.Close()
|
||||||
|
p.fs.putConnection(&f.c, err)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return g.Wait()
|
||||||
|
}
|
||||||
228
backend/smb/filepool_test.go
Normal file
228
backend/smb/filepool_test.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package smb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cloudsoda/go-smb2"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mock Fs that implements FsInterface
|
||||||
|
type mockFs struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
putConnectionCalled bool
|
||||||
|
putConnectionErr error
|
||||||
|
getConnectionCalled bool
|
||||||
|
getConnectionErr error
|
||||||
|
getConnectionResult *conn
|
||||||
|
removeSessionCalled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) putConnection(pc **conn, err error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.putConnectionCalled = true
|
||||||
|
m.putConnectionErr = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) getConnection(ctx context.Context, share string) (*conn, error) {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.getConnectionCalled = true
|
||||||
|
if m.getConnectionErr != nil {
|
||||||
|
return nil, m.getConnectionErr
|
||||||
|
}
|
||||||
|
if m.getConnectionResult != nil {
|
||||||
|
return m.getConnectionResult, nil
|
||||||
|
}
|
||||||
|
return &conn{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) removeSession() {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
m.removeSessionCalled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) isPutConnectionCalled() bool {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.putConnectionCalled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) getPutConnectionErr() error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.putConnectionErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockFs) isGetConnectionCalled() bool {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.getConnectionCalled
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockFs() *mockFs {
|
||||||
|
return &mockFs{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create a mock file
|
||||||
|
func newMockFile() *file {
|
||||||
|
return &file{
|
||||||
|
File: &smb2.File{},
|
||||||
|
c: &conn{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test filePool creation
|
||||||
|
func TestNewFilePool(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
share := "testshare"
|
||||||
|
path := "/test/path"
|
||||||
|
|
||||||
|
pool := newFilePool(ctx, fs, share, path)
|
||||||
|
|
||||||
|
assert.NotNil(t, pool)
|
||||||
|
assert.Equal(t, ctx, pool.ctx)
|
||||||
|
assert.Equal(t, fs, pool.fs)
|
||||||
|
assert.Equal(t, share, pool.share)
|
||||||
|
assert.Equal(t, path, pool.path)
|
||||||
|
assert.Empty(t, pool.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test getting file from pool when pool has files
|
||||||
|
func TestFilePool_Get_FromPool(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
// Add a mock file to the pool
|
||||||
|
mockFile := newMockFile()
|
||||||
|
pool.pool = append(pool.pool, mockFile)
|
||||||
|
|
||||||
|
// Get file from pool
|
||||||
|
f, err := pool.get()
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, f)
|
||||||
|
assert.Equal(t, mockFile, f)
|
||||||
|
assert.Empty(t, pool.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test getting file when pool is empty
|
||||||
|
func TestFilePool_Get_EmptyPool(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
|
||||||
|
// Set up the mock to return an error from getConnection
|
||||||
|
// This tests that the pool calls getConnection when empty
|
||||||
|
fs.getConnectionErr = errors.New("connection failed")
|
||||||
|
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "test/path")
|
||||||
|
|
||||||
|
// This should call getConnection and return the error
|
||||||
|
f, err := pool.get()
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Nil(t, f)
|
||||||
|
assert.True(t, fs.isGetConnectionCalled())
|
||||||
|
assert.Equal(t, "connection failed", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test putting file successfully
|
||||||
|
func TestFilePool_Put_Success(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
mockFile := newMockFile()
|
||||||
|
|
||||||
|
pool.put(mockFile, nil)
|
||||||
|
|
||||||
|
assert.Len(t, pool.pool, 1)
|
||||||
|
assert.Equal(t, mockFile, pool.pool[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test putting file with error
|
||||||
|
func TestFilePool_Put_WithError(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
mockFile := newMockFile()
|
||||||
|
|
||||||
|
pool.put(mockFile, errors.New("write error"))
|
||||||
|
|
||||||
|
// Should call putConnection with error
|
||||||
|
assert.True(t, fs.isPutConnectionCalled())
|
||||||
|
assert.Equal(t, errors.New("write error"), fs.getPutConnectionErr())
|
||||||
|
assert.Empty(t, pool.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test putting nil file
|
||||||
|
func TestFilePool_Put_NilFile(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
// Should not panic
|
||||||
|
pool.put(nil, nil)
|
||||||
|
pool.put(nil, errors.New("some error"))
|
||||||
|
|
||||||
|
assert.Empty(t, pool.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test draining pool with files
|
||||||
|
func TestFilePool_Drain_WithFiles(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
// Add mock files to pool
|
||||||
|
mockFile1 := newMockFile()
|
||||||
|
mockFile2 := newMockFile()
|
||||||
|
pool.pool = append(pool.pool, mockFile1, mockFile2)
|
||||||
|
|
||||||
|
// Before draining
|
||||||
|
assert.Len(t, pool.pool, 2)
|
||||||
|
|
||||||
|
_ = pool.drain()
|
||||||
|
assert.Empty(t, pool.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test concurrent access to pool
|
||||||
|
func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
fs := newMockFs()
|
||||||
|
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||||
|
|
||||||
|
const numGoroutines = 10
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
mockFile := newMockFile()
|
||||||
|
pool.pool = append(pool.pool, mockFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test concurrent get operations
|
||||||
|
done := make(chan bool, numGoroutines)
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
go func() {
|
||||||
|
defer func() { done <- true }()
|
||||||
|
|
||||||
|
f, err := pool.get()
|
||||||
|
if err == nil {
|
||||||
|
pool.put(f, nil)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numGoroutines; i++ {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pool should be in a consistent after the concurrence access
|
||||||
|
assert.Len(t, pool.pool, numGoroutines)
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package smb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -494,22 +495,82 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bs := int64(stat.BlockSize())
|
bs := stat.BlockSize()
|
||||||
usage := &fs.Usage{
|
usage := &fs.Usage{
|
||||||
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
|
Total: fs.NewUsageValue(bs * stat.TotalBlockCount()),
|
||||||
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
|
Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())),
|
||||||
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
|
Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()),
|
||||||
}
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type smbWriterAt struct {
|
||||||
|
pool *filePool
|
||||||
|
closed bool
|
||||||
|
closeMu sync.Mutex
|
||||||
|
wg sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *smbWriterAt) WriteAt(p []byte, off int64) (int, error) {
|
||||||
|
w.closeMu.Lock()
|
||||||
|
if w.closed {
|
||||||
|
w.closeMu.Unlock()
|
||||||
|
return 0, errors.New("writer already closed")
|
||||||
|
}
|
||||||
|
w.wg.Add(1)
|
||||||
|
w.closeMu.Unlock()
|
||||||
|
defer w.wg.Done()
|
||||||
|
|
||||||
|
f, err := w.pool.get()
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to get file from pool: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, writeErr := f.WriteAt(p, off)
|
||||||
|
w.pool.put(f, writeErr)
|
||||||
|
|
||||||
|
if writeErr != nil {
|
||||||
|
return n, fmt.Errorf("failed to write at offset %d: %w", off, writeErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, writeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *smbWriterAt) Close() error {
|
||||||
|
w.closeMu.Lock()
|
||||||
|
defer w.closeMu.Unlock()
|
||||||
|
|
||||||
|
if w.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.closed = true
|
||||||
|
|
||||||
|
// Wait for all pending writes to finish
|
||||||
|
w.wg.Wait()
|
||||||
|
|
||||||
|
var errs []error
|
||||||
|
|
||||||
|
// Drain the pool
|
||||||
|
if err := w.pool.drain(); err != nil {
|
||||||
|
errs = append(errs, fmt.Errorf("failed to drain file pool: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove session
|
||||||
|
w.pool.fs.removeSession()
|
||||||
|
|
||||||
|
if len(errs) > 0 {
|
||||||
|
return errors.Join(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// OpenWriterAt opens with a handle for random access writes
|
// OpenWriterAt opens with a handle for random access writes
|
||||||
//
|
//
|
||||||
// Pass in the remote desired and the size if known.
|
// Pass in the remote desired and the size if known.
|
||||||
//
|
//
|
||||||
// It truncates any existing object
|
// It truncates any existing object
|
||||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||||
var err error
|
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -519,27 +580,42 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
|||||||
return nil, fs.ErrorIsDir
|
return nil, fs.ErrorIsDir
|
||||||
}
|
}
|
||||||
|
|
||||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
err := o.fs.ensureDirectory(ctx, share, filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
filename = o.fs.toSambaPath(filename)
|
smbPath := o.fs.toSambaPath(filename)
|
||||||
|
|
||||||
o.fs.addSession() // Show session in use
|
|
||||||
defer o.fs.removeSession()
|
|
||||||
|
|
||||||
|
// One-time truncate
|
||||||
cn, err := o.fs.getConnection(ctx, share)
|
cn, err := o.fs.getConnection(ctx, share)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
file, err := cn.smbShare.OpenFile(smbPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
||||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to open: %w", err)
|
o.fs.putConnection(&cn, err)
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if size > 0 {
|
||||||
|
if truncateErr := file.Truncate(size); truncateErr != nil {
|
||||||
|
_ = file.Close()
|
||||||
|
o.fs.putConnection(&cn, truncateErr)
|
||||||
|
return nil, fmt.Errorf("failed to truncate file: %w", truncateErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if closeErr := file.Close(); closeErr != nil {
|
||||||
|
o.fs.putConnection(&cn, closeErr)
|
||||||
|
return nil, fmt.Errorf("failed to close file after truncate: %w", closeErr)
|
||||||
|
}
|
||||||
|
o.fs.putConnection(&cn, nil)
|
||||||
|
|
||||||
return fl, nil
|
// Add a new session
|
||||||
|
o.fs.addSession()
|
||||||
|
|
||||||
|
return &smbWriterAt{
|
||||||
|
pool: newFilePool(ctx, o.fs, share, smbPath),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the backend, closing any background tasks and any
|
// Shutdown the backend, closing any background tasks and any
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
// Package common defines code common to the union and the policies
|
// Package common defines code common to the union and the policies
|
||||||
//
|
//
|
||||||
// These need to be defined in a separate package to avoid import loops
|
// These need to be defined in a separate package to avoid import loops
|
||||||
package common
|
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
|
||||||
|
|
||||||
import "github.com/rclone/rclone/fs"
|
import "github.com/rclone/rclone/fs"
|
||||||
|
|
||||||
|
|||||||
119
bin/make-test-certs.sh
Executable file
119
bin/make-test-certs.sh
Executable file
@@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Create test TLS certificates for use with rclone.
|
||||||
|
|
||||||
|
OUT_DIR="${OUT_DIR:-./tls-test}"
|
||||||
|
CA_SUBJ="${CA_SUBJ:-/C=US/ST=Test/L=Test/O=Test Org/OU=Test Unit/CN=Test Root CA}"
|
||||||
|
SERVER_CN="${SERVER_CN:-localhost}"
|
||||||
|
CLIENT_CN="${CLIENT_CN:-Test Client}"
|
||||||
|
CLIENT_KEY_PASS="${CLIENT_KEY_PASS:-testpassword}"
|
||||||
|
|
||||||
|
CA_DAYS=${CA_DAYS:-3650}
|
||||||
|
SERVER_DAYS=${SERVER_DAYS:-825}
|
||||||
|
CLIENT_DAYS=${CLIENT_DAYS:-825}
|
||||||
|
|
||||||
|
mkdir -p "$OUT_DIR"
|
||||||
|
cd "$OUT_DIR"
|
||||||
|
|
||||||
|
# Create OpenSSL config
|
||||||
|
|
||||||
|
# CA extensions
|
||||||
|
cat > ca_openssl.cnf <<'EOF'
|
||||||
|
[ ca_ext ]
|
||||||
|
basicConstraints = critical, CA:true, pathlen:1
|
||||||
|
keyUsage = critical, keyCertSign, cRLSign
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid:always,issuer
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Server extensions (SAN includes localhost + loopback IP)
|
||||||
|
cat > server_openssl.cnf <<EOF
|
||||||
|
[ server_ext ]
|
||||||
|
basicConstraints = critical, CA:false
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = serverAuth
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid,issuer
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
|
||||||
|
[ alt_names ]
|
||||||
|
DNS.1 = ${SERVER_CN}
|
||||||
|
IP.1 = 127.0.0.1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Client extensions (for mTLS client auth)
|
||||||
|
cat > client_openssl.cnf <<'EOF'
|
||||||
|
[ client_ext ]
|
||||||
|
basicConstraints = critical, CA:false
|
||||||
|
keyUsage = critical, digitalSignature, keyEncipherment
|
||||||
|
extendedKeyUsage = clientAuth
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid,issuer
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Create CA key, CSR, and self-signed CA cert"
|
||||||
|
if [ ! -f ca.key.pem ]; then
|
||||||
|
openssl genrsa -out ca.key.pem 4096
|
||||||
|
chmod 600 ca.key.pem
|
||||||
|
fi
|
||||||
|
|
||||||
|
openssl req -new -key ca.key.pem -subj "$CA_SUBJ" -out ca.csr.pem
|
||||||
|
|
||||||
|
openssl x509 -req -in ca.csr.pem -signkey ca.key.pem \
|
||||||
|
-sha256 -days "$CA_DAYS" \
|
||||||
|
-extfile ca_openssl.cnf -extensions ca_ext \
|
||||||
|
-out ca.cert.pem
|
||||||
|
|
||||||
|
echo "Create server key (NO PASSWORD) and cert signed by CA"
|
||||||
|
openssl genrsa -out server.key.pem 2048
|
||||||
|
chmod 600 server.key.pem
|
||||||
|
|
||||||
|
openssl req -new -key server.key.pem -subj "/CN=${SERVER_CN}" -out server.csr.pem
|
||||||
|
|
||||||
|
openssl x509 -req -in server.csr.pem \
|
||||||
|
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||||
|
-out server.cert.pem -days "$SERVER_DAYS" -sha256 \
|
||||||
|
-extfile server_openssl.cnf -extensions server_ext
|
||||||
|
|
||||||
|
echo "Create client key (PASSWORD-PROTECTED), CSR, and cert"
|
||||||
|
openssl genrsa -aes256 -passout pass:"$CLIENT_KEY_PASS" -out client.key.pem 2048
|
||||||
|
chmod 600 client.key.pem
|
||||||
|
|
||||||
|
openssl req -new -key client.key.pem -passin pass:"$CLIENT_KEY_PASS" \
|
||||||
|
-subj "/CN=${CLIENT_CN}" -out client.csr.pem
|
||||||
|
|
||||||
|
openssl x509 -req -in client.csr.pem \
|
||||||
|
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||||
|
-out client.cert.pem -days "$CLIENT_DAYS" -sha256 \
|
||||||
|
-extfile client_openssl.cnf -extensions client_ext
|
||||||
|
|
||||||
|
echo "Verify chain"
|
||||||
|
openssl verify -CAfile ca.cert.pem server.cert.pem client.cert.pem
|
||||||
|
|
||||||
|
echo "Done"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Summary"
|
||||||
|
echo "-------"
|
||||||
|
printf "%-22s %s\n" \
|
||||||
|
"CA key:" "ca.key.pem" \
|
||||||
|
"CA cert:" "ca.cert.pem" \
|
||||||
|
"Server key:" "server.key.pem (no password)" \
|
||||||
|
"Server CSR:" "server.csr.pem" \
|
||||||
|
"Server cert:" "server.cert.pem (SAN: ${SERVER_CN}, 127.0.0.1)" \
|
||||||
|
"Client key:" "client.key.pem (encrypted)" \
|
||||||
|
"Client CSR:" "client.csr.pem" \
|
||||||
|
"Client cert:" "client.cert.pem" \
|
||||||
|
"Client key password:" "$CLIENT_KEY_PASS"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Test rclone server"
|
||||||
|
echo
|
||||||
|
echo "rclone serve http -vv --addr :8080 --cert ${OUT_DIR}/server.cert.pem --key ${OUT_DIR}/server.key.pem --client-ca ${OUT_DIR}/ca.cert.pem ."
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo "Test rclone client"
|
||||||
|
echo
|
||||||
|
echo "rclone lsf :http: --http-url 'https://localhost:8080' --ca-cert ${OUT_DIR}/ca.cert.pem --client-cert ${OUT_DIR}/client.cert.pem --client-key ${OUT_DIR}/client.key.pem --client-pass \$(rclone obscure $CLIENT_KEY_PASS)"
|
||||||
|
echo
|
||||||
@@ -57,11 +57,11 @@ def make_out(data, indent=""):
|
|||||||
return
|
return
|
||||||
del(data[category])
|
del(data[category])
|
||||||
if indent != "" and len(lines) == 1:
|
if indent != "" and len(lines) == 1:
|
||||||
out_lines.append(indent+"* " + title+": " + lines[0])
|
out_lines.append(indent+"- " + title+": " + lines[0])
|
||||||
return
|
return
|
||||||
out_lines.append(indent+"* " + title)
|
out_lines.append(indent+"- " + title)
|
||||||
for line in lines:
|
for line in lines:
|
||||||
out_lines.append(indent+" * " + line)
|
out_lines.append(indent+" - " + line)
|
||||||
return out, out_lines
|
return out, out_lines
|
||||||
|
|
||||||
|
|
||||||
@@ -129,12 +129,12 @@ def main():
|
|||||||
new_features[name].append(message)
|
new_features[name].append(message)
|
||||||
|
|
||||||
# Output new features
|
# Output new features
|
||||||
out, new_features_lines = make_out(new_features, indent=" ")
|
out, new_features_lines = make_out(new_features, indent=" ")
|
||||||
for name in sorted(new_features.keys()):
|
for name in sorted(new_features.keys()):
|
||||||
out(name)
|
out(name)
|
||||||
|
|
||||||
# Output bugfixes
|
# Output bugfixes
|
||||||
out, bugfix_lines = make_out(bugfixes, indent=" ")
|
out, bugfix_lines = make_out(bugfixes, indent=" ")
|
||||||
for name in sorted(bugfixes.keys()):
|
for name in sorted(bugfixes.keys()):
|
||||||
out(name)
|
out(name)
|
||||||
|
|
||||||
@@ -163,15 +163,15 @@ def main():
|
|||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/%(version)s...%(next_version)s)
|
[See commits](https://github.com/rclone/rclone/compare/%(version)s...%(next_version)s)
|
||||||
|
|
||||||
* New backends
|
- New backends
|
||||||
* New commands
|
- New commands
|
||||||
* New Features
|
- New Features
|
||||||
%(new_features)s
|
%(new_features)s
|
||||||
* Bug Fixes
|
- Bug Fixes
|
||||||
%(bugfixes)s
|
%(bugfixes)s
|
||||||
%(backend_changes)s""" % locals())
|
%(backend_changes)s""" % locals())
|
||||||
sys.stdout.write(old_tail)
|
sys.stdout.write(old_tail)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ def add_email(name, email):
|
|||||||
"""
|
"""
|
||||||
print("Adding %s <%s>" % (name, email))
|
print("Adding %s <%s>" % (name, email))
|
||||||
with open(AUTHORS, "a+") as fd:
|
with open(AUTHORS, "a+") as fd:
|
||||||
print(" * %s <%s>" % (name, email), file=fd)
|
print("- %s <%s>" % (name, email), file=fd)
|
||||||
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -51,47 +51,52 @@ output. The output is typically used, free, quota and trash contents.
|
|||||||
|
|
||||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||||
|
|
||||||
Total: 17 GiB
|
` + "```text" + `
|
||||||
Used: 7.444 GiB
|
Total: 17 GiB
|
||||||
Free: 1.315 GiB
|
Used: 7.444 GiB
|
||||||
Trashed: 100.000 MiB
|
Free: 1.315 GiB
|
||||||
Other: 8.241 GiB
|
Trashed: 100.000 MiB
|
||||||
|
Other: 8.241 GiB
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Where the fields are:
|
Where the fields are:
|
||||||
|
|
||||||
* Total: Total size available.
|
- Total: Total size available.
|
||||||
* Used: Total size used.
|
- Used: Total size used.
|
||||||
* Free: Total space available to this user.
|
- Free: Total space available to this user.
|
||||||
* Trashed: Total space used by trash.
|
- Trashed: Total space used by trash.
|
||||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||||
* Objects: Total number of objects in the storage.
|
- Objects: Total number of objects in the storage.
|
||||||
|
|
||||||
All sizes are in number of bytes.
|
All sizes are in number of bytes.
|
||||||
|
|
||||||
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||||
|
|
||||||
Total: 18253611008
|
` + "```text" + `
|
||||||
Used: 7993453766
|
Total: 18253611008
|
||||||
Free: 1411001220
|
Used: 7993453766
|
||||||
Trashed: 104857602
|
Free: 1411001220
|
||||||
Other: 8849156022
|
Trashed: 104857602
|
||||||
|
Other: 8849156022
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"total": 18253611008,
|
{
|
||||||
"used": 7993453766,
|
"total": 18253611008,
|
||||||
"trashed": 104857602,
|
"used": 7993453766,
|
||||||
"other": 8849156022,
|
"trashed": 104857602,
|
||||||
"free": 1411001220
|
"other": 8849156022,
|
||||||
}
|
"free": 1411001220
|
||||||
|
}
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Not all backends print all fields. Information is not included if it is not
|
Not all backends print all fields. Information is not included if it is not
|
||||||
provided by a backend. Where the value is unlimited it is omitted.
|
provided by a backend. Where the value is unlimited it is omitted.
|
||||||
|
|
||||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.41",
|
"versionIntroduced": "v1.41",
|
||||||
// "groups": "",
|
// "groups": "",
|
||||||
|
|||||||
@@ -23,21 +23,23 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
|
Use: "authorize <backendname> [base64_json_blob | client_id client_secret]",
|
||||||
Short: `Remote authorization.`,
|
Short: `Remote authorization.`,
|
||||||
Long: `Remote authorization. Used to authorize a remote or headless
|
Long: `Remote authorization. Used to authorize a remote or headless
|
||||||
rclone from a machine with a browser - use as instructed by
|
rclone from a machine with a browser. Use as instructed by rclone config.
|
||||||
rclone config.
|
See also the [remote setup documentation](/remote_setup).
|
||||||
|
|
||||||
The command requires 1-3 arguments:
|
The command requires 1-3 arguments:
|
||||||
- fs name (e.g., "drive", "s3", etc.)
|
|
||||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
- Name of a backend (e.g. "drive", "s3")
|
||||||
- Or a client_id and client_secret pair obtained from the remote service
|
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||||
|
- Or a client_id and client_secret pair obtained from the remote service
|
||||||
|
|
||||||
Use --auth-no-open-browser to prevent rclone to open auth
|
Use --auth-no-open-browser to prevent rclone to open auth
|
||||||
link in default browser automatically.
|
link in default browser automatically.
|
||||||
|
|
||||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
Use --template to generate HTML output via a custom Go template. If a blank
|
||||||
|
string is provided as an argument to this flag, the default template is used.`,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.27",
|
"versionIntroduced": "v1.27",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func TestAuthorizeCommand(t *testing.T) {
|
func TestAuthorizeCommand(t *testing.T) {
|
||||||
// Test that the Use string is correctly formatted
|
// Test that the Use string is correctly formatted
|
||||||
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
|
if commandDefinition.Use != "authorize <backendname> [base64_json_blob | client_id client_secret]" {
|
||||||
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -26,7 +26,7 @@ func TestAuthorizeCommand(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
helpOutput := buf.String()
|
helpOutput := buf.String()
|
||||||
if !strings.Contains(helpOutput, "authorize <fs name>") {
|
if !strings.Contains(helpOutput, "authorize <backendname>") {
|
||||||
t.Errorf("Help output doesn't contain correct usage information")
|
t.Errorf("Help output doesn't contain correct usage information")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,26 +37,33 @@ see the backend docs for definitions.
|
|||||||
|
|
||||||
You can discover what commands a backend implements by using
|
You can discover what commands a backend implements by using
|
||||||
|
|
||||||
rclone backend help remote:
|
` + "```sh" + `
|
||||||
rclone backend help <backendname>
|
rclone backend help remote:
|
||||||
|
rclone backend help <backendname>
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
You can also discover information about the backend using (see
|
You can also discover information about the backend using (see
|
||||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||||
for more info).
|
for more info).
|
||||||
|
|
||||||
rclone backend features remote:
|
` + "```sh" + `
|
||||||
|
rclone backend features remote:
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||||
|
|
||||||
rclone backend stats remote:path stats -o format=json -o long
|
` + "```sh" + `
|
||||||
|
rclone backend stats remote:path stats -o format=json -o long
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Pass arguments to the backend by placing them on the end of the line
|
Pass arguments to the backend by placing them on the end of the line
|
||||||
|
|
||||||
rclone backend cleanup remote:path file1 file2 file3
|
` + "```sh" + `
|
||||||
|
rclone backend cleanup remote:path file1 file2 file3
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Note to run these commands on a running backend then see
|
Note to run these commands on a running backend then see
|
||||||
[backend/command](/rc/#backend-command) in the rc docs.
|
[backend/command](/rc/#backend-command) in the rc docs.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.52",
|
"versionIntroduced": "v1.52",
|
||||||
"groups": "Important",
|
"groups": "Important",
|
||||||
|
|||||||
@@ -4,15 +4,19 @@ package bilib
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||||
func CaptureOutput(fun func()) []byte {
|
func CaptureOutput(fun func()) []byte {
|
||||||
|
var mu sync.Mutex
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
buf.WriteString(text)
|
buf.WriteString(text)
|
||||||
})
|
})
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -20,5 +24,7 @@ func CaptureOutput(fun func()) []byte {
|
|||||||
log.Handler.SetLevel(oldLevel)
|
log.Handler.SetLevel(oldLevel)
|
||||||
}()
|
}()
|
||||||
fun()
|
fun()
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
return buf.Bytes()
|
return buf.Bytes()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -176,6 +176,7 @@ var (
|
|||||||
// Flag -refresh-times helps with Dropbox tests failing with message
|
// Flag -refresh-times helps with Dropbox tests failing with message
|
||||||
// "src and dst identical but can't set mod time without deleting and re-uploading"
|
// "src and dst identical but can't set mod time without deleting and re-uploading"
|
||||||
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
|
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
|
||||||
|
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
|
||||||
)
|
)
|
||||||
|
|
||||||
// bisyncTest keeps all test data in a single place
|
// bisyncTest keeps all test data in a single place
|
||||||
@@ -226,6 +227,18 @@ var color = bisync.Color
|
|||||||
|
|
||||||
// TestMain drives the tests
|
// TestMain drives the tests
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
bisync.LogTZ = time.UTC
|
||||||
|
ci := fs.GetConfig(context.TODO())
|
||||||
|
ciSave := *ci
|
||||||
|
defer func() {
|
||||||
|
*ci = ciSave
|
||||||
|
}()
|
||||||
|
// need to set context.TODO() here as we cannot pass a ctx to fs.LogLevelPrintf
|
||||||
|
ci.LogLevel = fs.LogLevelInfo
|
||||||
|
if *argDebug {
|
||||||
|
ci.LogLevel = fs.LogLevelDebug
|
||||||
|
}
|
||||||
|
fstest.Initialise()
|
||||||
fstest.TestMain(m)
|
fstest.TestMain(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +251,8 @@ func TestBisyncRemoteLocal(t *testing.T) {
|
|||||||
fs.Logf(nil, "remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, remote, *argRemote2)
|
ctx, _ := fs.AddConfig(context.TODO())
|
||||||
|
testBisync(ctx, t, remote, *argRemote2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path1 is local, Path2 is remote
|
// Path1 is local, Path2 is remote
|
||||||
@@ -250,7 +264,8 @@ func TestBisyncLocalRemote(t *testing.T) {
|
|||||||
fs.Logf(nil, "remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, *argRemote2, remote)
|
ctx, _ := fs.AddConfig(context.TODO())
|
||||||
|
testBisync(ctx, t, *argRemote2, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path1 and Path2 are both different directories on remote
|
// Path1 and Path2 are both different directories on remote
|
||||||
@@ -260,14 +275,34 @@ func TestBisyncRemoteRemote(t *testing.T) {
|
|||||||
fs.Logf(nil, "remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, remote, remote)
|
ctx, _ := fs.AddConfig(context.TODO())
|
||||||
|
testBisync(ctx, t, remote, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure rc can cope with running concurrent jobs
|
||||||
|
func TestBisyncConcurrent(t *testing.T) {
|
||||||
|
if !isLocal(*fstest.RemoteName) {
|
||||||
|
t.Skip("TestBisyncConcurrent is skipped on non-local")
|
||||||
|
}
|
||||||
|
oldArgTestCase := argTestCase
|
||||||
|
*argTestCase = "basic"
|
||||||
|
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
|
||||||
|
t.Cleanup(func() {
|
||||||
|
argTestCase = oldArgTestCase
|
||||||
|
*ignoreLogs = false
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("test1", testParallel)
|
||||||
|
t.Run("test2", testParallel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testParallel(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
TestBisyncRemoteRemote(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestBisync is a test engine for bisync test cases.
|
// TestBisync is a test engine for bisync test cases.
|
||||||
func testBisync(t *testing.T, path1, path2 string) {
|
func testBisync(ctx context.Context, t *testing.T, path1, path2 string) {
|
||||||
ctx := context.Background()
|
|
||||||
fstest.Initialise()
|
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
ciSave := *ci
|
ciSave := *ci
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -276,8 +311,9 @@ func testBisync(t *testing.T, path1, path2 string) {
|
|||||||
if *argRefreshTimes {
|
if *argRefreshTimes {
|
||||||
ci.RefreshTimes = true
|
ci.RefreshTimes = true
|
||||||
}
|
}
|
||||||
|
bisync.ColorsLock.Lock()
|
||||||
bisync.Colors = true
|
bisync.Colors = true
|
||||||
time.Local = bisync.TZ
|
bisync.ColorsLock.Unlock()
|
||||||
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
|
ci.FsCacheExpireDuration = fs.Duration(5 * time.Hour)
|
||||||
|
|
||||||
baseDir, err := os.Getwd()
|
baseDir, err := os.Getwd()
|
||||||
@@ -563,11 +599,15 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isLocal(remote string) bool {
|
||||||
|
return bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",")
|
||||||
|
}
|
||||||
|
|
||||||
// makeTempRemote creates temporary folder and makes a filesystem
|
// makeTempRemote creates temporary folder and makes a filesystem
|
||||||
// if a local path is provided, it's ignored (the test will run under system temp)
|
// if a local path is provided, it's ignored (the test will run under system temp)
|
||||||
func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string) (f, parent fs.Fs, path, canon string) {
|
func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string) (f, parent fs.Fs, path, canon string) {
|
||||||
var err error
|
var err error
|
||||||
if bilib.IsLocalPath(remote) && !strings.HasPrefix(remote, ":") && !strings.Contains(remote, ",") {
|
if isLocal(remote) {
|
||||||
if remote != "" && !strings.HasPrefix(remote, "local") && *fstest.RemoteName != "" {
|
if remote != "" && !strings.HasPrefix(remote, "local") && *fstest.RemoteName != "" {
|
||||||
b.t.Fatalf(`Missing ":" in remote %q. Use "local" to test with local filesystem.`, remote)
|
b.t.Fatalf(`Missing ":" in remote %q. Use "local" to test with local filesystem.`, remote)
|
||||||
}
|
}
|
||||||
@@ -598,13 +638,8 @@ func (b *bisyncTest) makeTempRemote(ctx context.Context, remote, subdir string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||||
// Silence "directory not found" errors from the ftp backend
|
_ = operations.Purge(ctx, b.fs1, "")
|
||||||
_ = bilib.CaptureOutput(func() {
|
_ = operations.Purge(ctx, b.fs2, "")
|
||||||
_ = operations.Purge(ctx, b.fs1, "")
|
|
||||||
})
|
|
||||||
_ = bilib.CaptureOutput(func() {
|
|
||||||
_ = operations.Purge(ctx, b.fs2, "")
|
|
||||||
})
|
|
||||||
_ = os.RemoveAll(b.workDir)
|
_ = os.RemoveAll(b.workDir)
|
||||||
accounting.Stats(ctx).ResetCounters()
|
accounting.Stats(ctx).ResetCounters()
|
||||||
}
|
}
|
||||||
@@ -619,11 +654,6 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
*ci = ciSave
|
*ci = ciSave
|
||||||
}()
|
}()
|
||||||
ci.LogLevel = fs.LogLevelInfo
|
|
||||||
if b.debug {
|
|
||||||
ci.LogLevel = fs.LogLevelDebug
|
|
||||||
}
|
|
||||||
|
|
||||||
testFunc := func() {
|
testFunc := func() {
|
||||||
src := filepath.Join(b.dataDir, "file7.txt")
|
src := filepath.Join(b.dataDir, "file7.txt")
|
||||||
|
|
||||||
@@ -953,6 +983,12 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
|||||||
b.fs2.Features().Disable("Copy") // API has longstanding bug for conflictBehavior=replace https://github.com/rclone/rclone/issues/4590
|
b.fs2.Features().Disable("Copy") // API has longstanding bug for conflictBehavior=replace https://github.com/rclone/rclone/issues/4590
|
||||||
b.fs2.Features().Disable("Move")
|
b.fs2.Features().Disable("Move")
|
||||||
}
|
}
|
||||||
|
if strings.HasPrefix(b.fs1.String(), "sftp") {
|
||||||
|
b.fs1.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(b.fs2.String(), "sftp") {
|
||||||
|
b.fs2.Features().Disable("Copy") // disable --sftp-copy-is-hardlink as hardlinks are not truly copies
|
||||||
|
}
|
||||||
if strings.Contains(strings.ToLower(fs.ConfigString(b.fs1)), "mailru") || strings.Contains(strings.ToLower(fs.ConfigString(b.fs2)), "mailru") {
|
if strings.Contains(strings.ToLower(fs.ConfigString(b.fs1)), "mailru") || strings.Contains(strings.ToLower(fs.ConfigString(b.fs2)), "mailru") {
|
||||||
fs.GetConfig(ctx).TPSLimit = 10 // https://github.com/rclone/rclone/issues/7768#issuecomment-2060888980
|
fs.GetConfig(ctx).TPSLimit = 10 // https://github.com/rclone/rclone/issues/7768#issuecomment-2060888980
|
||||||
}
|
}
|
||||||
@@ -975,17 +1011,23 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
|||||||
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
|
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
|
||||||
obj, err := f.Put(ctx, in, objinfo)
|
obj, err := f.Put(ctx, in, objinfo)
|
||||||
require.NoError(b.t, err)
|
require.NoError(b.t, err)
|
||||||
|
if !f.Features().IsLocal {
|
||||||
|
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
|
||||||
|
}
|
||||||
err = obj.SetModTime(ctx, initDate)
|
err = obj.SetModTime(ctx, initDate)
|
||||||
if err == fs.ErrorCantSetModTime {
|
if err == fs.ErrorCantSetModTime {
|
||||||
if b.testCase != "nomodtime" {
|
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
}
|
||||||
}
|
if !f.Features().IsLocal {
|
||||||
|
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
|
||||||
}
|
}
|
||||||
err = obj.Remove(ctx)
|
err = obj.Remove(ctx)
|
||||||
require.NoError(b.t, err)
|
require.NoError(b.t, err)
|
||||||
}
|
}
|
||||||
testSetModtime(b.fs1)
|
if b.testCase != "nomodtime" {
|
||||||
testSetModtime(b.fs2)
|
testSetModtime(b.fs1)
|
||||||
|
testSetModtime(b.fs2)
|
||||||
|
}
|
||||||
|
|
||||||
if b.testCase == "normalization" || b.testCase == "extended_char_paths" || b.testCase == "extended_filenames" {
|
if b.testCase == "normalization" || b.testCase == "extended_char_paths" || b.testCase == "extended_filenames" {
|
||||||
// test whether remote is capable of running test
|
// test whether remote is capable of running test
|
||||||
@@ -1429,6 +1471,9 @@ func (b *bisyncTest) compareResults() int {
|
|||||||
resultText := b.mangleResult(b.workDir, file, false)
|
resultText := b.mangleResult(b.workDir, file, false)
|
||||||
|
|
||||||
if fileType(file) == "log" {
|
if fileType(file) == "log" {
|
||||||
|
if *ignoreLogs {
|
||||||
|
continue
|
||||||
|
}
|
||||||
// save mangled logs so difference is easier on eyes
|
// save mangled logs so difference is easier on eyes
|
||||||
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
|
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
|
||||||
resultFile := filepath.Join(b.logDir, "mangled.result.log")
|
resultFile := filepath.Join(b.logDir, "mangled.result.log")
|
||||||
|
|||||||
@@ -16,15 +16,17 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
)
|
)
|
||||||
|
|
||||||
var hashType hash.Type
|
type bisyncCheck = struct {
|
||||||
var fsrc, fdst fs.Fs
|
hashType hash.Type
|
||||||
var fcrypt *crypt.Fs
|
fsrc, fdst fs.Fs
|
||||||
|
fcrypt *crypt.Fs
|
||||||
|
}
|
||||||
|
|
||||||
// WhichCheck determines which CheckFn we should use based on the Fs types
|
// WhichCheck determines which CheckFn we should use based on the Fs types
|
||||||
// It is more robust and accurate than Check because
|
// It is more robust and accurate than Check because
|
||||||
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
|
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
|
||||||
// it returns the *operations.CheckOpt with the CheckFn set.
|
// it returns the *operations.CheckOpt with the CheckFn set.
|
||||||
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
|
func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
|
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
|
||||||
|
|
||||||
@@ -40,32 +42,32 @@ func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.Check
|
|||||||
|
|
||||||
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
|
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
|
||||||
// if both are crypt or only dst is crypt
|
// if both are crypt or only dst is crypt
|
||||||
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
|
b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne()
|
||||||
if hashType != hash.None {
|
if b.check.hashType != hash.None {
|
||||||
// use cryptcheck
|
// use cryptcheck
|
||||||
fsrc = opt.Fsrc
|
b.check.fsrc = opt.Fsrc
|
||||||
fdst = opt.Fdst
|
b.check.fdst = opt.Fdst
|
||||||
fcrypt = FdstCrypt
|
b.check.fcrypt = FdstCrypt
|
||||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||||
opt.Check = CryptCheckFn
|
opt.Check = b.CryptCheckFn
|
||||||
return opt
|
return opt
|
||||||
}
|
}
|
||||||
} else if srcIsCrypt && !dstIsCrypt {
|
} else if srcIsCrypt && !dstIsCrypt {
|
||||||
// if only src is crypt
|
// if only src is crypt
|
||||||
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
|
b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
|
||||||
if hashType != hash.None {
|
if b.check.hashType != hash.None {
|
||||||
// use reverse cryptcheck
|
// use reverse cryptcheck
|
||||||
fsrc = opt.Fdst
|
b.check.fsrc = opt.Fdst
|
||||||
fdst = opt.Fsrc
|
b.check.fdst = opt.Fsrc
|
||||||
fcrypt = FsrcCrypt
|
b.check.fcrypt = FsrcCrypt
|
||||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||||
opt.Check = ReverseCryptCheckFn
|
opt.Check = b.ReverseCryptCheckFn
|
||||||
return opt
|
return opt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we've gotten this far, neither check or cryptcheck will work, so use --download
|
// if we've gotten this far, neither check or cryptcheck will work, so use --download
|
||||||
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
|
fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
|
||||||
opt.Check = DownloadCheckFn
|
opt.Check = DownloadCheckFn
|
||||||
return opt
|
return opt
|
||||||
}
|
}
|
||||||
@@ -88,17 +90,17 @@ func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CryptCheckFn is a slightly modified version of CryptCheck
|
// CryptCheckFn is a slightly modified version of CryptCheck
|
||||||
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||||
cryptDst := dst.(*crypt.Object)
|
cryptDst := dst.(*crypt.Object)
|
||||||
underlyingDst := cryptDst.UnWrap()
|
underlyingDst := cryptDst.UnWrap()
|
||||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
|
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
|
||||||
}
|
}
|
||||||
if underlyingHash == "" {
|
if underlyingHash == "" {
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, false, fmt.Errorf("error computing hash: %w", err)
|
return true, false, fmt.Errorf("error computing hash: %w", err)
|
||||||
}
|
}
|
||||||
@@ -106,10 +108,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
|
|||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
if cryptHash != underlyingHash {
|
if cryptHash != underlyingHash {
|
||||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash)
|
||||||
fs.Debugf(src, "%s", err.Error())
|
fs.Debugf(src, "%s", err.Error())
|
||||||
// using same error msg as CheckFn so integration tests match
|
// using same error msg as CheckFn so integration tests match
|
||||||
err = fmt.Errorf("%v differ", hashType)
|
err = fmt.Errorf("%v differ", b.check.hashType)
|
||||||
fs.Errorf(src, "%s", err.Error())
|
fs.Errorf(src, "%s", err.Error())
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
@@ -118,8 +120,8 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
|
|||||||
|
|
||||||
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
|
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
|
||||||
// result: src is crypt, dst is non-crypt
|
// result: src is crypt, dst is non-crypt
|
||||||
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||||
return CryptCheckFn(ctx, src, dst)
|
return b.CryptCheckFn(ctx, src, dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadCheckFn is a slightly modified version of Check with --download
|
// DownloadCheckFn is a slightly modified version of Check with --download
|
||||||
@@ -137,7 +139,7 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
|
|||||||
if filterCheck.HaveFilesFrom() {
|
if filterCheck.HaveFilesFrom() {
|
||||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||||
|
|
||||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
opt, close, checkopterr := check.GetCheckOpt(fs1, fs2)
|
||||||
if checkopterr != nil {
|
if checkopterr != nil {
|
||||||
b.critical = true
|
b.critical = true
|
||||||
b.retryable = true
|
b.retryable = true
|
||||||
@@ -148,16 +150,16 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
|
|||||||
|
|
||||||
opt.Match = new(bytes.Buffer)
|
opt.Match = new(bytes.Buffer)
|
||||||
|
|
||||||
opt = WhichCheck(ctxCheck, opt)
|
opt = b.WhichCheck(ctxCheck, opt)
|
||||||
|
|
||||||
fs.Infof(nil, "Checking potential conflicts...")
|
fs.Infof(nil, "Checking potential conflicts...")
|
||||||
check := operations.CheckFn(ctxCheck, opt)
|
check := operations.CheckFn(ctxCheck, opt)
|
||||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||||
|
|
||||||
//reset error count, because we don't want to count check errors as bisync errors
|
// reset error count, because we don't want to count check errors as bisync errors
|
||||||
accounting.Stats(ctxCheck).ResetErrors()
|
accounting.Stats(ctxCheck).ResetErrors()
|
||||||
|
|
||||||
//return the list of identical files to check against later
|
// return the list of identical files to check against later
|
||||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||||
}
|
}
|
||||||
@@ -173,14 +175,14 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
|
|||||||
|
|
||||||
// WhichEqual is similar to WhichCheck, but checks a single object.
|
// WhichEqual is similar to WhichCheck, but checks a single object.
|
||||||
// Returns true if the objects are equal, false if they differ or if we don't know
|
// Returns true if the objects are equal, false if they differ or if we don't know
|
||||||
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
|
func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
|
||||||
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
|
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
|
||||||
if checkopterr != nil {
|
if checkopterr != nil {
|
||||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||||
}
|
}
|
||||||
defer close()
|
defer close()
|
||||||
|
|
||||||
opt = WhichCheck(ctx, opt)
|
opt = b.WhichCheck(ctx, opt)
|
||||||
differ, noHash, err := opt.Check(ctx, dst, src)
|
differ, noHash, err := opt.Check(ctx, dst, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(src, "failed to check: %v", err)
|
fs.Errorf(src, "failed to check: %v", err)
|
||||||
@@ -217,7 +219,7 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
|
|||||||
equal, skipHash = timeSizeEqualFn()
|
equal, skipHash = timeSizeEqualFn()
|
||||||
if equal && !skipHash {
|
if equal && !skipHash {
|
||||||
whichHashType := func(f fs.Info) hash.Type {
|
whichHashType := func(f fs.Info) hash.Type {
|
||||||
ht := getHashType(f.Name())
|
ht := b.getHashType(f.Name())
|
||||||
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
|
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
|
||||||
ht = f.Hashes().GetOne()
|
ht = f.Hashes().GetOne()
|
||||||
}
|
}
|
||||||
@@ -225,9 +227,9 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
|
|||||||
}
|
}
|
||||||
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
|
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
|
||||||
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
|
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
|
||||||
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
|
srcHash, _ = b.tryDownloadHash(ctx, src, srcHash)
|
||||||
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
|
dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash)
|
||||||
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
|
equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
|
||||||
}
|
}
|
||||||
if equal {
|
if equal {
|
||||||
logger(ctx, operations.Match, src, dst, nil)
|
logger(ctx, operations.Match, src, dst, nil)
|
||||||
@@ -247,7 +249,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
|
|||||||
// note that arg order is path1, path2, regardless of src/dst
|
// note that arg order is path1, path2, regardless of src/dst
|
||||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||||
if sizeDiffers(path1.Size(), path2.Size()) {
|
if sizeDiffers(path1.Size(), path2.Size()) {
|
||||||
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), b.opt.ResyncMode)
|
||||||
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
|
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
|
||||||
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
|
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
|
||||||
}
|
}
|
||||||
@@ -257,7 +259,7 @@ func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.Obje
|
|||||||
// note that arg order is path1, path2, regardless of src/dst
|
// note that arg order is path1, path2, regardless of src/dst
|
||||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||||
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
|
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
|
||||||
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), b.opt.ResyncMode)
|
||||||
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
|
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
|
||||||
if !b.resyncWinningPathToEqual(winningPath) {
|
if !b.resyncWinningPathToEqual(winningPath) {
|
||||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||||
|
|||||||
@@ -115,6 +115,7 @@ func (x *CheckSyncMode) Type() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Opt keeps command line options
|
// Opt keeps command line options
|
||||||
|
// internal functions should use b.opt instead
|
||||||
var Opt Options
|
var Opt Options
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -140,7 +141,7 @@ func init() {
|
|||||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync.", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||||
@@ -162,7 +163,6 @@ var commandDefinition = &cobra.Command{
|
|||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.58",
|
"versionIntroduced": "v1.58",
|
||||||
"groups": "Filter,Copy,Important",
|
"groups": "Filter,Copy,Important",
|
||||||
"status": "Beta",
|
|
||||||
},
|
},
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||||
@@ -190,7 +190,6 @@ var commandDefinition = &cobra.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
|
||||||
cmd.Run(false, true, command, func() error {
|
cmd.Run(false, true, command, func() error {
|
||||||
err := Bisync(ctx, fs1, fs2, &opt)
|
err := Bisync(ctx, fs1, fs2, &opt)
|
||||||
if err == ErrBisyncAborted {
|
if err == ErrBisyncAborted {
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ type CompareOpt = struct {
|
|||||||
DownloadHash bool
|
DownloadHash bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
// defaults
|
// defaults
|
||||||
@@ -120,25 +120,25 @@ func sizeDiffers(a, b int64) bool {
|
|||||||
|
|
||||||
// returns true if the hashes are definitely different.
|
// returns true if the hashes are definitely different.
|
||||||
// returns false if equal, or if either is unknown.
|
// returns false if equal, or if either is unknown.
|
||||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||||
if a == "" || b == "" {
|
if stringA == "" || stringB == "" {
|
||||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if ht1 != ht2 {
|
if ht1 != ht2 {
|
||||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return a != b
|
return stringA != stringB
|
||||||
}
|
}
|
||||||
|
|
||||||
// chooses hash type, giving priority to types both sides have in common
|
// chooses hash type, giving priority to types both sides have in common
|
||||||
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||||
downloadHash = b.opt.Compare.DownloadHash
|
b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash
|
||||||
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
|
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
|
||||||
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
|
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
|
||||||
} else {
|
} else {
|
||||||
@@ -177,7 +177,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
|||||||
}
|
}
|
||||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||||
b.opt.Compare.HashType1 = hash.None
|
b.opt.Compare.HashType2 = hash.None
|
||||||
} else {
|
} else {
|
||||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||||
if b.opt.Compare.HashType2 != hash.None {
|
if b.opt.Compare.HashType2 != hash.None {
|
||||||
@@ -268,13 +268,15 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
|
// b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
|
||||||
var downloadHash bool
|
type downloadHashOpt struct {
|
||||||
var downloadHashWarn mutex.Once
|
downloadHash bool
|
||||||
var firstDownloadHash mutex.Once
|
downloadHashWarn mutex.Once
|
||||||
|
firstDownloadHash mutex.Once
|
||||||
|
}
|
||||||
|
|
||||||
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
|
func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
|
||||||
if hashVal != "" || !downloadHash {
|
if hashVal != "" || !b.downloadHashOpt.downloadHash {
|
||||||
return hashVal, nil
|
return hashVal, nil
|
||||||
}
|
}
|
||||||
obj, ok := o.(fs.Object)
|
obj, ok := o.(fs.Object)
|
||||||
@@ -283,14 +285,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
|||||||
return hashVal, fs.ErrorObjectNotFound
|
return hashVal, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
if o.Size() < 0 {
|
if o.Size() < 0 {
|
||||||
downloadHashWarn.Do(func() {
|
b.downloadHashOpt.downloadHashWarn.Do(func() {
|
||||||
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||||
})
|
})
|
||||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||||
return hashVal, hash.ErrUnsupported
|
return hashVal, hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
firstDownloadHash.Do(func() {
|
b.downloadHashOpt.firstDownloadHash.Do(func() {
|
||||||
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||||
})
|
})
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||||
|
|||||||
@@ -219,7 +219,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b.opt.Compare.Checksum {
|
if b.opt.Compare.Checksum {
|
||||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||||
d |= deltaHash
|
d |= deltaHash
|
||||||
@@ -346,7 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
|||||||
if d2.is(deltaOther) {
|
if d2.is(deltaOther) {
|
||||||
// if size or hash differ, skip this, as we already know they're not equal
|
// if size or hash differ, skip this, as we already know they're not equal
|
||||||
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
||||||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
(b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
||||||
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
||||||
} else {
|
} else {
|
||||||
checkit := func(filename string) {
|
checkit := func(filename string) {
|
||||||
@@ -393,10 +393,10 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
|||||||
// if files are identical, leave them alone instead of renaming
|
// if files are identical, leave them alone instead of renaming
|
||||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||||
ls1.getPut(file, skippedDirs1)
|
b.march.ls1.getPut(file, skippedDirs1)
|
||||||
ls2.getPut(file, skippedDirs2)
|
b.march.ls2.getPut(file, skippedDirs2)
|
||||||
b.debugFn(file, func() {
|
b.debugFn(file, func() {
|
||||||
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
|
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName)))
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
equal := matches.Has(file)
|
equal := matches.Has(file)
|
||||||
@@ -409,16 +409,16 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
|
|||||||
// the Path1 version is deemed "correct" in this scenario
|
// the Path1 version is deemed "correct" in this scenario
|
||||||
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
||||||
copy1to2.Add(file)
|
copy1to2.Add(file)
|
||||||
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
} else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
||||||
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
||||||
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
|
if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) {
|
||||||
// Path2 is newer
|
// Path2 is newer
|
||||||
b.indent("Path2", p1, "Queue copy to Path1")
|
b.indent("Path2", p1, "Queue copy to Path1")
|
||||||
copy2to1.Add(ls2.getTryAlias(file, alias))
|
copy2to1.Add(b.march.ls2.getTryAlias(file, alias))
|
||||||
} else {
|
} else {
|
||||||
// Path1 is newer
|
// Path1 is newer
|
||||||
b.indent("Path1", p2, "Queue copy to Path2")
|
b.indent("Path1", p2, "Queue copy to Path2")
|
||||||
copy1to2.Add(ls1.getTryAlias(file, alias))
|
copy1to2.Add(b.march.ls1.getTryAlias(file, alias))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||||
@@ -590,10 +590,10 @@ func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
|
|||||||
fullMap1 := map[string]string{} // [transformedname]originalname
|
fullMap1 := map[string]string{} // [transformedname]originalname
|
||||||
fullMap2 := map[string]string{} // [transformedname]originalname
|
fullMap2 := map[string]string{} // [transformedname]originalname
|
||||||
|
|
||||||
for _, name := range ls1.list {
|
for _, name := range b.march.ls1.list {
|
||||||
fullMap1[transform(name)] = name
|
fullMap1[transform(name)] = name
|
||||||
}
|
}
|
||||||
for _, name := range ls2.list {
|
for _, name := range b.march.ls2.list {
|
||||||
fullMap2[transform(name)] = name
|
fullMap2[transform(name)] = name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,8 +35,7 @@ var rcHelp = makeHelp(`This takes the following parameters
|
|||||||
- removeEmptyDirs - remove empty directories at the final cleanup step
|
- removeEmptyDirs - remove empty directories at the final cleanup step
|
||||||
- filtersFile - read filtering patterns from a file
|
- filtersFile - read filtering patterns from a file
|
||||||
- ignoreListingChecksum - Do not use checksums for listings
|
- ignoreListingChecksum - Do not use checksums for listings
|
||||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||||
Use at your own risk!
|
|
||||||
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
||||||
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
||||||
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
||||||
@@ -52,14 +51,15 @@ var longHelp = shortHelp + makeHelp(`
|
|||||||
bidirectional cloud sync solution in rclone.
|
bidirectional cloud sync solution in rclone.
|
||||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||||
On each successive run it will:
|
On each successive run it will:
|
||||||
|
|
||||||
- list files on Path1 and Path2, and check for changes on each side.
|
- list files on Path1 and Path2, and check for changes on each side.
|
||||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||||
|
|
||||||
Bisync is **in beta** and is considered an **advanced command**, so use with care.
|
Bisync is considered an **advanced command**, so use with care.
|
||||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
before using, or data loss can result. Questions can be asked in the
|
||||||
|
[Rclone Forum](https://forum.rclone.org/).
|
||||||
|
|
||||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
See [full bisync description](https://rclone.org/bisync/) for details.`)
|
||||||
`)
|
|
||||||
|
|||||||
@@ -42,10 +42,14 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
|
|||||||
// timeFormat defines time format used in listings
|
// timeFormat defines time format used in listings
|
||||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||||
|
|
||||||
// TZ defines time zone used in listings
|
|
||||||
var (
|
var (
|
||||||
|
// TZ defines time zone used in listings
|
||||||
TZ = time.UTC
|
TZ = time.UTC
|
||||||
tzLocal = false
|
tzLocal = false
|
||||||
|
|
||||||
|
// LogTZ defines time zone used in logs (which may be different than that used in listings).
|
||||||
|
// time.Local by default, but we force UTC on tests to make them deterministic regardless of tester's location.
|
||||||
|
LogTZ = time.Local
|
||||||
)
|
)
|
||||||
|
|
||||||
// fileInfo describes a file
|
// fileInfo describes a file
|
||||||
@@ -198,8 +202,8 @@ func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool
|
|||||||
equal = false
|
equal = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b.opt.Compare.Checksum && !ignoreListingChecksum {
|
if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum {
|
||||||
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
|
if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
|
||||||
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
|
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
|
||||||
equal = false
|
equal = false
|
||||||
}
|
}
|
||||||
@@ -243,7 +247,7 @@ func (ls *fileList) sort() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// save will save listing to a file.
|
// save will save listing to a file.
|
||||||
func (ls *fileList) save(ctx context.Context, listing string) error {
|
func (ls *fileList) save(listing string) error {
|
||||||
file, err := os.Create(listing)
|
file, err := os.Create(listing)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -430,7 +434,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fulllisting, err = b.loadListingNum(listingNum)
|
fulllisting, err = b.loadListingNum(listingNum)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.critical = true
|
b.critical = true
|
||||||
b.retryable = true
|
b.retryable = true
|
||||||
@@ -606,6 +609,11 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if srcNewName != "" { // if it was renamed and not deleted
|
if srcNewName != "" { // if it was renamed and not deleted
|
||||||
|
if new == nil { // should not happen. log error and debug info
|
||||||
|
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
|
||||||
|
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
|
||||||
|
continue
|
||||||
|
}
|
||||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||||
}
|
}
|
||||||
@@ -708,9 +716,9 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
|
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
|
||||||
}
|
}
|
||||||
// update files
|
// update files
|
||||||
err = srcList.save(ctx, srcListing)
|
err = srcList.save(srcListing)
|
||||||
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
|
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
|
||||||
err = dstList.save(ctx, dstListing)
|
err = dstList.save(dstListing)
|
||||||
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
|
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -741,7 +749,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
|
|||||||
if hashType != hash.None {
|
if hashType != hash.None {
|
||||||
hashVal, _ = obj.Hash(ctxRecheck, hashType)
|
hashVal, _ = obj.Hash(ctxRecheck, hashType)
|
||||||
}
|
}
|
||||||
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
|
hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal)
|
||||||
}
|
}
|
||||||
var modtime time.Time
|
var modtime time.Time
|
||||||
if b.opt.Compare.Modtime {
|
if b.opt.Compare.Modtime {
|
||||||
@@ -755,7 +763,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
|
|||||||
for _, dstObj := range dstObjs {
|
for _, dstObj := range dstObjs {
|
||||||
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
|
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
|
||||||
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
|
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
|
||||||
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
|
if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
|
||||||
putObj(srcObj, srcList)
|
putObj(srcObj, srcList)
|
||||||
putObj(dstObj, dstList)
|
putObj(dstObj, dstList)
|
||||||
resolved = append(resolved, srcObj.Remote())
|
resolved = append(resolved, srcObj.Remote())
|
||||||
@@ -769,7 +777,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
|
|||||||
// skip and error during --resync, as rollback is not possible
|
// skip and error during --resync, as rollback is not possible
|
||||||
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
|
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
|
||||||
if b.opt.Resync {
|
if b.opt.Resync {
|
||||||
err = errors.New("no dstObj match or files not equal")
|
err := errors.New("no dstObj match or files not equal")
|
||||||
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
|
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
|
||||||
} else {
|
} else {
|
||||||
toRollback = append(toRollback, srcObj.Remote())
|
toRollback = append(toRollback, srcObj.Remote())
|
||||||
|
|||||||
@@ -16,16 +16,17 @@ import (
|
|||||||
|
|
||||||
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
|
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
|
||||||
|
|
||||||
var stopRenewal func()
|
type lockFileOpt struct {
|
||||||
|
stopRenewal func()
|
||||||
|
data struct {
|
||||||
|
Session string
|
||||||
|
PID string
|
||||||
|
TimeRenewed time.Time
|
||||||
|
TimeExpires time.Time
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var data = struct {
|
func (b *bisyncRun) setLockFile() (err error) {
|
||||||
Session string
|
|
||||||
PID string
|
|
||||||
TimeRenewed time.Time
|
|
||||||
TimeExpires time.Time
|
|
||||||
}{}
|
|
||||||
|
|
||||||
func (b *bisyncRun) setLockFile() error {
|
|
||||||
b.lockFile = ""
|
b.lockFile = ""
|
||||||
b.setLockFileExpiration()
|
b.setLockFileExpiration()
|
||||||
if !b.opt.DryRun {
|
if !b.opt.DryRun {
|
||||||
@@ -45,24 +46,23 @@ func (b *bisyncRun) setLockFile() error {
|
|||||||
}
|
}
|
||||||
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
|
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
|
||||||
b.renewLockFile()
|
b.renewLockFile()
|
||||||
stopRenewal = b.startLockRenewal()
|
b.lockFileOpt.stopRenewal = b.startLockRenewal()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) removeLockFile() {
|
func (b *bisyncRun) removeLockFile() (err error) {
|
||||||
if b.lockFile != "" {
|
if b.lockFile != "" {
|
||||||
stopRenewal()
|
b.lockFileOpt.stopRenewal()
|
||||||
errUnlock := os.Remove(b.lockFile)
|
err = os.Remove(b.lockFile)
|
||||||
if errUnlock == nil {
|
if err == nil {
|
||||||
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
|
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
|
||||||
} else if err == nil {
|
|
||||||
err = errUnlock
|
|
||||||
} else {
|
} else {
|
||||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
|
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err)
|
||||||
}
|
}
|
||||||
b.lockFile = "" // block removing it again
|
b.lockFile = "" // block removing it again
|
||||||
}
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) setLockFileExpiration() {
|
func (b *bisyncRun) setLockFileExpiration() {
|
||||||
@@ -77,18 +77,18 @@ func (b *bisyncRun) setLockFileExpiration() {
|
|||||||
func (b *bisyncRun) renewLockFile() {
|
func (b *bisyncRun) renewLockFile() {
|
||||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||||
|
|
||||||
data.Session = b.basePath
|
b.lockFileOpt.data.Session = b.basePath
|
||||||
data.PID = strconv.Itoa(os.Getpid())
|
b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid())
|
||||||
data.TimeRenewed = time.Now()
|
b.lockFileOpt.data.TimeRenewed = time.Now()
|
||||||
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
|
b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
|
||||||
|
|
||||||
// save data file
|
// save data file
|
||||||
df, err := os.Create(b.lockFile)
|
df, err := os.Create(b.lockFile)
|
||||||
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
|
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
|
||||||
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
|
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true)
|
||||||
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
|
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
|
||||||
if b.opt.MaxLock < basicallyforever {
|
if b.opt.MaxLock < basicallyforever {
|
||||||
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
|
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ func (b *bisyncRun) lockFileIsExpired() bool {
|
|||||||
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
|
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
|
||||||
dec := json.NewDecoder(rdf)
|
dec := json.NewDecoder(rdf)
|
||||||
for {
|
for {
|
||||||
if err := dec.Decode(&data); err != nil {
|
if err := dec.Decode(&b.lockFileOpt.data); err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
fs.Errorf(b.lockFile, "err: %v", err)
|
fs.Errorf(b.lockFile, "err: %v", err)
|
||||||
}
|
}
|
||||||
@@ -107,14 +107,14 @@ func (b *bisyncRun) lockFileIsExpired() bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
|
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
|
||||||
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
|
if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) {
|
||||||
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
|
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires)
|
||||||
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
|
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
|
||||||
markFailed(b.listing2)
|
markFailed(b.listing2)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
|
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second))
|
||||||
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
|
prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -67,10 +68,15 @@ func quotePath(path string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Colors controls whether terminal colors are enabled
|
// Colors controls whether terminal colors are enabled
|
||||||
var Colors bool
|
var (
|
||||||
|
Colors bool
|
||||||
|
ColorsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
// Color handles terminal colors for bisync
|
// Color handles terminal colors for bisync
|
||||||
func Color(style string, s string) string {
|
func Color(style string, s string) string {
|
||||||
|
ColorsLock.Lock()
|
||||||
|
defer ColorsLock.Unlock()
|
||||||
if !Colors {
|
if !Colors {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@@ -80,6 +86,8 @@ func Color(style string, s string) string {
|
|||||||
|
|
||||||
// ColorX handles terminal colors for bisync
|
// ColorX handles terminal colors for bisync
|
||||||
func ColorX(style string, s string) string {
|
func ColorX(style string, s string) string {
|
||||||
|
ColorsLock.Lock()
|
||||||
|
defer ColorsLock.Unlock()
|
||||||
if !Colors {
|
if !Colors {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,18 +12,20 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/march"
|
"github.com/rclone/rclone/fs/march"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ls1 = newFileList()
|
type bisyncMarch struct {
|
||||||
var ls2 = newFileList()
|
ls1 *fileList
|
||||||
var err error
|
ls2 *fileList
|
||||||
var firstErr error
|
err error
|
||||||
var marchAliasLock sync.Mutex
|
firstErr error
|
||||||
var marchLsLock sync.Mutex
|
marchAliasLock sync.Mutex
|
||||||
var marchErrLock sync.Mutex
|
marchLsLock sync.Mutex
|
||||||
var marchCtx context.Context
|
marchErrLock sync.Mutex
|
||||||
|
marchCtx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
|
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
marchCtx = ctx
|
b.march.marchCtx = ctx
|
||||||
b.setupListing()
|
b.setupListing()
|
||||||
fs.Debugf(b, "starting to march!")
|
fs.Debugf(b, "starting to march!")
|
||||||
|
|
||||||
@@ -39,31 +41,31 @@ func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList,
|
|||||||
NoCheckDest: false,
|
NoCheckDest: false,
|
||||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||||
}
|
}
|
||||||
err = m.Run(ctx)
|
b.march.err = m.Run(ctx)
|
||||||
|
|
||||||
fs.Debugf(b, "march completed. err: %v", err)
|
fs.Debugf(b, "march completed. err: %v", b.march.err)
|
||||||
if err == nil {
|
if b.march.err == nil {
|
||||||
err = firstErr
|
b.march.err = b.march.firstErr
|
||||||
}
|
}
|
||||||
if err != nil {
|
if b.march.err != nil {
|
||||||
b.handleErr("march", "error during march", err, true, true)
|
b.handleErr("march", "error during march", b.march.err, true, true)
|
||||||
b.abort = true
|
b.abort = true
|
||||||
return ls1, ls2, err
|
return b.march.ls1, b.march.ls2, b.march.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// save files
|
// save files
|
||||||
if b.opt.Compare.DownloadHash && ls1.hash == hash.None {
|
if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None {
|
||||||
ls1.hash = hash.MD5
|
b.march.ls1.hash = hash.MD5
|
||||||
}
|
}
|
||||||
if b.opt.Compare.DownloadHash && ls2.hash == hash.None {
|
if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None {
|
||||||
ls2.hash = hash.MD5
|
b.march.ls2.hash = hash.MD5
|
||||||
}
|
}
|
||||||
err = ls1.save(ctx, b.newListing1)
|
b.march.err = b.march.ls1.save(b.newListing1)
|
||||||
b.handleErr(ls1, "error saving ls1 from march", err, true, true)
|
b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true)
|
||||||
err = ls2.save(ctx, b.newListing2)
|
b.march.err = b.march.ls2.save(b.newListing2)
|
||||||
b.handleErr(ls2, "error saving ls2 from march", err, true, true)
|
b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true)
|
||||||
|
|
||||||
return ls1, ls2, err
|
return b.march.ls1, b.march.ls2, b.march.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SrcOnly have an object which is on path1 only
|
// SrcOnly have an object which is on path1 only
|
||||||
@@ -83,9 +85,9 @@ func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
|
|||||||
// Match is called when object exists on both path1 and path2 (whether equal or not)
|
// Match is called when object exists on both path1 and path2 (whether equal or not)
|
||||||
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
|
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
|
||||||
fs.Debugf(o1, "both path1 and path2")
|
fs.Debugf(o1, "both path1 and path2")
|
||||||
marchAliasLock.Lock()
|
b.march.marchAliasLock.Lock()
|
||||||
b.aliases.Add(o1.Remote(), o2.Remote())
|
b.aliases.Add(o1.Remote(), o2.Remote())
|
||||||
marchAliasLock.Unlock()
|
b.march.marchAliasLock.Unlock()
|
||||||
b.parse(o1, true)
|
b.parse(o1, true)
|
||||||
b.parse(o2, false)
|
b.parse(o2, false)
|
||||||
return isDir(o1)
|
return isDir(o1)
|
||||||
@@ -119,76 +121,76 @@ func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) setupListing() {
|
func (b *bisyncRun) setupListing() {
|
||||||
ls1 = newFileList()
|
b.march.ls1 = newFileList()
|
||||||
ls2 = newFileList()
|
b.march.ls2 = newFileList()
|
||||||
|
|
||||||
// note that --ignore-listing-checksum is different from --ignore-checksum
|
// note that --ignore-listing-checksum is different from --ignore-checksum
|
||||||
// and we already checked it when we set b.opt.Compare.HashType1 and 2
|
// and we already checked it when we set b.opt.Compare.HashType1 and 2
|
||||||
ls1.hash = b.opt.Compare.HashType1
|
b.march.ls1.hash = b.opt.Compare.HashType1
|
||||||
ls2.hash = b.opt.Compare.HashType2
|
b.march.ls2.hash = b.opt.Compare.HashType2
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
|
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
|
||||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
|
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(marchCtx, nil)
|
tr.Done(b.march.marchCtx, nil)
|
||||||
}()
|
}()
|
||||||
var (
|
var (
|
||||||
hashVal string
|
hashVal string
|
||||||
hashErr error
|
hashErr error
|
||||||
)
|
)
|
||||||
ls := whichLs(isPath1)
|
ls := b.whichLs(isPath1)
|
||||||
hashType := ls.hash
|
hashType := ls.hash
|
||||||
if hashType != hash.None {
|
if hashType != hash.None {
|
||||||
hashVal, hashErr = o.Hash(marchCtx, hashType)
|
hashVal, hashErr = o.Hash(b.march.marchCtx, hashType)
|
||||||
marchErrLock.Lock()
|
b.march.marchErrLock.Lock()
|
||||||
if firstErr == nil {
|
if b.march.firstErr == nil {
|
||||||
firstErr = hashErr
|
b.march.firstErr = hashErr
|
||||||
}
|
}
|
||||||
marchErrLock.Unlock()
|
b.march.marchErrLock.Unlock()
|
||||||
}
|
}
|
||||||
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal)
|
hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal)
|
||||||
marchErrLock.Lock()
|
b.march.marchErrLock.Lock()
|
||||||
if firstErr == nil {
|
if b.march.firstErr == nil {
|
||||||
firstErr = hashErr
|
b.march.firstErr = hashErr
|
||||||
}
|
}
|
||||||
if firstErr != nil {
|
if b.march.firstErr != nil {
|
||||||
b.handleErr(hashType, "error hashing during march", firstErr, false, true)
|
b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true)
|
||||||
}
|
}
|
||||||
marchErrLock.Unlock()
|
b.march.marchErrLock.Unlock()
|
||||||
|
|
||||||
var modtime time.Time
|
var modtime time.Time
|
||||||
if b.opt.Compare.Modtime {
|
if b.opt.Compare.Modtime {
|
||||||
modtime = o.ModTime(marchCtx).In(TZ)
|
modtime = o.ModTime(b.march.marchCtx).In(TZ)
|
||||||
}
|
}
|
||||||
id := "" // TODO: ID(o)
|
id := "" // TODO: ID(o)
|
||||||
flags := "-" // "-" for a file and "d" for a directory
|
flags := "-" // "-" for a file and "d" for a directory
|
||||||
marchLsLock.Lock()
|
b.march.marchLsLock.Lock()
|
||||||
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
|
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
|
||||||
marchLsLock.Unlock()
|
b.march.marchLsLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
|
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
|
||||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
|
tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(marchCtx, nil)
|
tr.Done(b.march.marchCtx, nil)
|
||||||
}()
|
}()
|
||||||
ls := whichLs(isPath1)
|
ls := b.whichLs(isPath1)
|
||||||
var modtime time.Time
|
var modtime time.Time
|
||||||
if b.opt.Compare.Modtime {
|
if b.opt.Compare.Modtime {
|
||||||
modtime = o.ModTime(marchCtx).In(TZ)
|
modtime = o.ModTime(b.march.marchCtx).In(TZ)
|
||||||
}
|
}
|
||||||
id := "" // TODO
|
id := "" // TODO
|
||||||
flags := "d" // "-" for a file and "d" for a directory
|
flags := "d" // "-" for a file and "d" for a directory
|
||||||
marchLsLock.Lock()
|
b.march.marchLsLock.Lock()
|
||||||
ls.put(o.Remote(), -1, modtime, "", id, flags)
|
ls.put(o.Remote(), -1, modtime, "", id, flags)
|
||||||
marchLsLock.Unlock()
|
b.march.marchLsLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func whichLs(isPath1 bool) *fileList {
|
func (b *bisyncRun) whichLs(isPath1 bool) *fileList {
|
||||||
ls := ls1
|
ls := b.march.ls1
|
||||||
if !isPath1 {
|
if !isPath1 {
|
||||||
ls = ls2
|
ls = b.march.ls2
|
||||||
}
|
}
|
||||||
return ls
|
return ls
|
||||||
}
|
}
|
||||||
@@ -206,7 +208,7 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
|
|||||||
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
|
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
|
||||||
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
|
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
|
||||||
ci := fs.GetConfig(ctxCheckFile)
|
ci := fs.GetConfig(ctxCheckFile)
|
||||||
marchCtx = ctxCheckFile
|
b.march.marchCtx = ctxCheckFile
|
||||||
|
|
||||||
b.setupListing()
|
b.setupListing()
|
||||||
fs.Debugf(b, "starting to march!")
|
fs.Debugf(b, "starting to march!")
|
||||||
@@ -223,18 +225,18 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
|
|||||||
NoCheckDest: false,
|
NoCheckDest: false,
|
||||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||||
}
|
}
|
||||||
err = m.Run(ctxCheckFile)
|
b.march.err = m.Run(ctxCheckFile)
|
||||||
|
|
||||||
fs.Debugf(b, "march completed. err: %v", err)
|
fs.Debugf(b, "march completed. err: %v", b.march.err)
|
||||||
if err == nil {
|
if b.march.err == nil {
|
||||||
err = firstErr
|
b.march.err = b.march.firstErr
|
||||||
}
|
}
|
||||||
if err != nil {
|
if b.march.err != nil {
|
||||||
b.handleErr("march", "error during findCheckFiles", err, true, true)
|
b.handleErr("march", "error during findCheckFiles", b.march.err, true, true)
|
||||||
b.abort = true
|
b.abort = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return ls1, ls2, err
|
return b.march.ls1, b.march.ls2, b.march.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
|
|||||||
@@ -51,6 +51,11 @@ type bisyncRun struct {
|
|||||||
lockFile string
|
lockFile string
|
||||||
renames renames
|
renames renames
|
||||||
resyncIs1to2 bool
|
resyncIs1to2 bool
|
||||||
|
march bisyncMarch
|
||||||
|
check bisyncCheck
|
||||||
|
queueOpt bisyncQueueOpt
|
||||||
|
downloadHashOpt downloadHashOpt
|
||||||
|
lockFileOpt lockFileOpt
|
||||||
}
|
}
|
||||||
|
|
||||||
type queues struct {
|
type queues struct {
|
||||||
@@ -64,7 +69,6 @@ type queues struct {
|
|||||||
|
|
||||||
// Bisync handles lock file, performs bisync run and checks exit status
|
// Bisync handles lock file, performs bisync run and checks exit status
|
||||||
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||||
defer resetGlobals()
|
|
||||||
opt := *optArg // ensure that input is never changed
|
opt := *optArg // ensure that input is never changed
|
||||||
b := &bisyncRun{
|
b := &bisyncRun{
|
||||||
fs1: fs1,
|
fs1: fs1,
|
||||||
@@ -83,7 +87,9 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
opt.OrigBackupDir = ci.BackupDir
|
opt.OrigBackupDir = ci.BackupDir
|
||||||
|
|
||||||
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
|
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
|
||||||
|
ColorsLock.Lock()
|
||||||
Colors = true
|
Colors = true
|
||||||
|
ColorsLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
err = b.setCompareDefaults(ctx)
|
err = b.setCompareDefaults(ctx)
|
||||||
@@ -93,7 +99,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
|
|
||||||
b.setResyncDefaults()
|
b.setResyncDefaults()
|
||||||
|
|
||||||
err = b.setResolveDefaults(ctx)
|
err = b.setResolveDefaults()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -124,6 +130,8 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
b.queueOpt.logger = operations.NewLoggerOpt()
|
||||||
|
|
||||||
// Handle SIGINT
|
// Handle SIGINT
|
||||||
var finaliseOnce gosync.Once
|
var finaliseOnce gosync.Once
|
||||||
|
|
||||||
@@ -161,7 +169,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
markFailed(b.listing1)
|
markFailed(b.listing1)
|
||||||
markFailed(b.listing2)
|
markFailed(b.listing2)
|
||||||
}
|
}
|
||||||
b.removeLockFile()
|
err = b.removeLockFile()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -171,7 +179,10 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
|||||||
// run bisync
|
// run bisync
|
||||||
err = b.runLocked(ctx)
|
err = b.runLocked(ctx)
|
||||||
|
|
||||||
b.removeLockFile()
|
removeLockErr := b.removeLockFile()
|
||||||
|
if err == nil {
|
||||||
|
err = removeLockErr
|
||||||
|
}
|
||||||
|
|
||||||
b.CleanupCompleted = true
|
b.CleanupCompleted = true
|
||||||
if b.InGracefulShutdown {
|
if b.InGracefulShutdown {
|
||||||
@@ -262,7 +273,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
|
|
||||||
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
|
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
|
||||||
if opt.Resync {
|
if opt.Resync {
|
||||||
return b.resync(octx, fctx)
|
return b.resync(fctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for existence of prior Path1 and Path2 listings
|
// Check for existence of prior Path1 and Path2 listings
|
||||||
@@ -297,7 +308,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx)
|
||||||
if err != nil || accounting.Stats(fctx).Errored() {
|
if err != nil || accounting.Stats(fctx).Errored() {
|
||||||
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||||
b.critical = true
|
b.critical = true
|
||||||
@@ -307,7 +318,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
|
|
||||||
// Check for Path1 deltas relative to the prior sync
|
// Check for Path1 deltas relative to the prior sync
|
||||||
fs.Infof(nil, "Path1 checking for diffs")
|
fs.Infof(nil, "Path1 checking for diffs")
|
||||||
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1")
|
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -315,7 +326,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
|
|
||||||
// Check for Path2 deltas relative to the prior sync
|
// Check for Path2 deltas relative to the prior sync
|
||||||
fs.Infof(nil, "Path2 checking for diffs")
|
fs.Infof(nil, "Path2 checking for diffs")
|
||||||
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2")
|
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -389,7 +400,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
|||||||
newl1, _ := b.loadListing(b.newListing1)
|
newl1, _ := b.loadListing(b.newListing1)
|
||||||
newl2, _ := b.loadListing(b.newListing2)
|
newl2, _ := b.loadListing(b.newListing2)
|
||||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
||||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
|
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
|
||||||
}
|
}
|
||||||
b.saveOldListings()
|
b.saveOldListings()
|
||||||
// save new listings
|
// save new listings
|
||||||
@@ -553,7 +564,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
|
|||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) {
|
||||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||||
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||||
return err
|
return err
|
||||||
@@ -586,7 +597,7 @@ func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) checkSyntax() error {
|
func (b *bisyncRun) checkSyntax() (err error) {
|
||||||
// check for odd number of quotes in path, usually indicating an escaping issue
|
// check for odd number of quotes in path, usually indicating an escaping issue
|
||||||
path1 := bilib.FsPath(b.fs1)
|
path1 := bilib.FsPath(b.fs1)
|
||||||
path2 := bilib.FsPath(b.fs2)
|
path2 := bilib.FsPath(b.fs2)
|
||||||
@@ -634,25 +645,3 @@ func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// mainly to make sure tests don't interfere with each other when running more than one
|
|
||||||
func resetGlobals() {
|
|
||||||
downloadHash = false
|
|
||||||
logger = operations.NewLoggerOpt()
|
|
||||||
ignoreListingChecksum = false
|
|
||||||
ignoreListingModtime = false
|
|
||||||
hashTypes = nil
|
|
||||||
queueCI = nil
|
|
||||||
hashType = 0
|
|
||||||
fsrc, fdst = nil, nil
|
|
||||||
fcrypt = nil
|
|
||||||
Opt = Options{}
|
|
||||||
once = gosync.Once{}
|
|
||||||
downloadHashWarn = gosync.Once{}
|
|
||||||
firstDownloadHash = gosync.Once{}
|
|
||||||
ls1 = newFileList()
|
|
||||||
ls2 = newFileList()
|
|
||||||
err = nil
|
|
||||||
firstErr = nil
|
|
||||||
marchCtx = nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -51,19 +51,19 @@ func (rs *ResultsSlice) has(name string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
type bisyncQueueOpt struct {
|
||||||
logger = operations.NewLoggerOpt()
|
logger operations.LoggerOpt
|
||||||
lock mutex.Mutex
|
lock mutex.Mutex
|
||||||
once mutex.Once
|
once mutex.Once
|
||||||
ignoreListingChecksum bool
|
ignoreListingChecksum bool
|
||||||
ignoreListingModtime bool
|
ignoreListingModtime bool
|
||||||
hashTypes map[string]hash.Type
|
hashTypes map[string]hash.Type
|
||||||
queueCI *fs.ConfigInfo
|
queueCI *fs.ConfigInfo
|
||||||
)
|
}
|
||||||
|
|
||||||
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
||||||
func getHashType(fname string) hash.Type {
|
func (b *bisyncRun) getHashType(fname string) hash.Type {
|
||||||
ht, ok := hashTypes[fname]
|
ht, ok := b.queueOpt.hashTypes[fname]
|
||||||
if ok {
|
if ok {
|
||||||
return ht
|
return ht
|
||||||
}
|
}
|
||||||
@@ -106,9 +106,9 @@ func altName(name string, src, dst fs.DirEntry) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteResults is Bisync's LoggerFn
|
// WriteResults is Bisync's LoggerFn
|
||||||
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||||
lock.Lock()
|
b.queueOpt.lock.Lock()
|
||||||
defer lock.Unlock()
|
defer b.queueOpt.lock.Unlock()
|
||||||
|
|
||||||
opt := operations.GetLoggerOpt(ctx)
|
opt := operations.GetLoggerOpt(ctx)
|
||||||
result := Results{
|
result := Results{
|
||||||
@@ -131,14 +131,14 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
|||||||
result.Flags = "-"
|
result.Flags = "-"
|
||||||
if side != nil {
|
if side != nil {
|
||||||
result.Size = side.Size()
|
result.Size = side.Size()
|
||||||
if !ignoreListingModtime {
|
if !b.queueOpt.ignoreListingModtime {
|
||||||
result.Modtime = side.ModTime(ctx).In(TZ)
|
result.Modtime = side.ModTime(ctx).In(TZ)
|
||||||
}
|
}
|
||||||
if !ignoreListingChecksum {
|
if !b.queueOpt.ignoreListingChecksum {
|
||||||
sideObj, ok := side.(fs.ObjectInfo)
|
sideObj, ok := side.(fs.ObjectInfo)
|
||||||
if ok {
|
if ok {
|
||||||
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
|
result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name()))
|
||||||
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
|
result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -159,8 +159,8 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
|||||||
}
|
}
|
||||||
|
|
||||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) {
|
||||||
once.Do(func() {
|
b.queueOpt.once.Do(func() {
|
||||||
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -189,14 +189,14 @@ func ReadResults(results io.Reader) []Results {
|
|||||||
|
|
||||||
// for setup code shared by both fastCopy and resyncDir
|
// for setup code shared by both fastCopy and resyncDir
|
||||||
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
|
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
|
||||||
queueCI = fs.GetConfig(ctx)
|
b.queueOpt.queueCI = fs.GetConfig(ctx)
|
||||||
ignoreListingChecksum = b.opt.IgnoreListingChecksum
|
b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum
|
||||||
ignoreListingModtime = !b.opt.Compare.Modtime
|
b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime
|
||||||
hashTypes = map[string]hash.Type{
|
b.queueOpt.hashTypes = map[string]hash.Type{
|
||||||
b.fs1.Name(): b.opt.Compare.HashType1,
|
b.fs1.Name(): b.opt.Compare.HashType1,
|
||||||
b.fs2.Name(): b.opt.Compare.HashType2,
|
b.fs2.Name(): b.opt.Compare.HashType2,
|
||||||
}
|
}
|
||||||
logger.LoggerFn = WriteResults
|
b.queueOpt.logger.LoggerFn = b.WriteResults
|
||||||
overridingEqual := false
|
overridingEqual := false
|
||||||
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
|
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
|
||||||
overridingEqual = true
|
overridingEqual = true
|
||||||
@@ -209,15 +209,15 @@ func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
|
|||||||
fs.Debugf(nil, "overriding equal")
|
fs.Debugf(nil, "overriding equal")
|
||||||
ctx = b.EqualFn(ctx)
|
ctx = b.EqualFn(ctx)
|
||||||
}
|
}
|
||||||
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
|
ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger)
|
||||||
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
|
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
|
||||||
// set here in case !b.opt.Compare.Modtime
|
// set here in case !b.opt.Compare.Modtime
|
||||||
queueCI = fs.GetConfig(ctxCopyLogger)
|
b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger)
|
||||||
if b.opt.Compare.NoSlowHash {
|
if b.opt.Compare.NoSlowHash {
|
||||||
queueCI.CheckSum = false
|
b.queueOpt.queueCI.CheckSum = false
|
||||||
}
|
}
|
||||||
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
|
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
|
||||||
queueCI.CheckSum = true
|
b.queueOpt.queueCI.CheckSum = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ctxCopyLogger
|
return ctxCopyLogger
|
||||||
@@ -245,14 +245,16 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
if accounting.MaxCompletedTransfers != -1 {
|
||||||
|
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||||
|
}
|
||||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||||
b.testFn()
|
b.testFn()
|
||||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
|
||||||
|
|
||||||
getResults := ReadResults(logger.JSON)
|
getResults := ReadResults(b.queueOpt.logger.JSON)
|
||||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
|
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
|
||||||
|
|
||||||
lineFormat := "%s %8d %s %s %s %q\n"
|
lineFormat := "%s %8d %s %s %s %q\n"
|
||||||
@@ -292,9 +294,9 @@ func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results,
|
|||||||
ctx = b.preCopy(ctx)
|
ctx = b.preCopy(ctx)
|
||||||
|
|
||||||
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
|
||||||
|
|
||||||
getResults := ReadResults(logger.JSON)
|
getResults := ReadResults(b.queueOpt.logger.JSON)
|
||||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
|
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
|
||||||
|
|
||||||
return getResults, err
|
return getResults, err
|
||||||
|
|||||||
@@ -77,7 +77,7 @@ func (conflictLoserChoices) Type() string {
|
|||||||
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
|
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
|
||||||
var ConflictLoserList = Opt.ConflictLoser.Help()
|
var ConflictLoserList = Opt.ConflictLoser.Help()
|
||||||
|
|
||||||
func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
func (b *bisyncRun) setResolveDefaults() error {
|
||||||
if b.opt.ConflictLoser == ConflictLoserSkip {
|
if b.opt.ConflictLoser == ConflictLoserSkip {
|
||||||
b.opt.ConflictLoser = ConflictLoserNumber
|
b.opt.ConflictLoser = ConflictLoserNumber
|
||||||
}
|
}
|
||||||
@@ -135,7 +135,7 @@ type namePair struct {
|
|||||||
newName string
|
newName string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error {
|
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) {
|
||||||
winningPath := 0
|
winningPath := 0
|
||||||
if b.opt.ConflictResolve != PreferNone {
|
if b.opt.ConflictResolve != PreferNone {
|
||||||
winningPath = b.conflictWinner(ds1, ds2, file, alias)
|
winningPath = b.conflictWinner(ds1, ds2, file, alias)
|
||||||
@@ -197,7 +197,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
|||||||
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
|
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
|
||||||
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
|
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
|
||||||
// delete 2, copy 1 to 2
|
// delete 2, copy 1 to 2
|
||||||
err = b.delete(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, renameSkipped)
|
err = b.delete(ctxMove, r.path2, path2, b.fs2, 2, renameSkipped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -207,7 +207,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
|||||||
copy1to2.Add(r.path1.oldName)
|
copy1to2.Add(r.path1.oldName)
|
||||||
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
|
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
|
||||||
// delete 1, copy 2 to 1
|
// delete 1, copy 2 to 1
|
||||||
err = b.delete(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, renameSkipped)
|
err = b.delete(ctxMove, r.path1, path1, b.fs1, 1, renameSkipped)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -261,15 +261,15 @@ func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName
|
|||||||
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
|
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
|
||||||
for i := startnum; i < math.MaxInt; i++ {
|
for i := startnum; i < math.MaxInt; i++ {
|
||||||
iStr := fmt.Sprint(i)
|
iStr := fmt.Sprint(i)
|
||||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
|
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
|
||||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
|
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
|
||||||
// make sure it still holds true with suffixes switched (it should)
|
// make sure it still holds true with suffixes switched (it should)
|
||||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
|
!b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
|
||||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
!b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
|
!b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
|
||||||
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
@@ -280,10 +280,10 @@ func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias stri
|
|||||||
|
|
||||||
// like numerate, but consider only one side's suffix (for when suffixes are different)
|
// like numerate, but consider only one side's suffix (for when suffixes are different)
|
||||||
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
|
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
|
||||||
lsA, lsB := ls1, ls2
|
lsA, lsB := b.march.ls1, b.march.ls2
|
||||||
suffix := b.opt.ConflictSuffix1
|
suffix := b.opt.ConflictSuffix1
|
||||||
if path == 2 {
|
if path == 2 {
|
||||||
lsA, lsB = ls2, ls1
|
lsA, lsB = b.march.ls2, b.march.ls1
|
||||||
suffix = b.opt.ConflictSuffix2
|
suffix = b.opt.ConflictSuffix2
|
||||||
}
|
}
|
||||||
for i := startnum; i < math.MaxInt; i++ {
|
for i := startnum; i < math.MaxInt; i++ {
|
||||||
@@ -299,7 +299,7 @@ func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alia
|
|||||||
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error {
|
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) {
|
||||||
if winningPath == thisPathNum {
|
if winningPath == thisPathNum {
|
||||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
|
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
|
||||||
} else {
|
} else {
|
||||||
@@ -321,7 +321,7 @@ func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error {
|
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath string, thisFs fs.Fs, thisPathNum int, renameSkipped *bilib.Names) (err error) {
|
||||||
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
|
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
|
||||||
if !skip {
|
if !skip {
|
||||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
|
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
|
||||||
@@ -359,17 +359,17 @@ func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string)
|
|||||||
return 2
|
return 2
|
||||||
case PreferNewer, PreferOlder:
|
case PreferNewer, PreferOlder:
|
||||||
t1, t2 := ds1.time[remote1], ds2.time[remote2]
|
t1, t2 := ds1.time[remote1], ds2.time[remote2]
|
||||||
return b.resolveNewerOlder(t1, t2, remote1, remote2, b.opt.ConflictResolve)
|
return b.resolveNewerOlder(t1, t2, remote1, b.opt.ConflictResolve)
|
||||||
case PreferLarger, PreferSmaller:
|
case PreferLarger, PreferSmaller:
|
||||||
s1, s2 := ds1.size[remote1], ds2.size[remote2]
|
s1, s2 := ds1.size[remote1], ds2.size[remote2]
|
||||||
return b.resolveLargerSmaller(s1, s2, remote1, remote2, b.opt.ConflictResolve)
|
return b.resolveLargerSmaller(s1, s2, remote1, b.opt.ConflictResolve)
|
||||||
default:
|
default:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the winning path number, or 0 if winner can't be determined
|
// returns the winning path number, or 0 if winner can't be determined
|
||||||
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string, prefer Prefer) int {
|
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1 string, prefer Prefer) int {
|
||||||
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
|
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
|
||||||
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
|
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
|
||||||
return 0
|
return 0
|
||||||
@@ -380,31 +380,31 @@ func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string,
|
|||||||
}
|
}
|
||||||
if t1.After(t2) {
|
if t1.After(t2) {
|
||||||
if prefer == PreferNewer {
|
if prefer == PreferNewer {
|
||||||
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
|
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
|
||||||
return 1
|
return 1
|
||||||
} else if prefer == PreferOlder {
|
} else if prefer == PreferOlder {
|
||||||
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t1.Sub(t2))
|
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t1.Sub(t2))
|
||||||
return 2
|
return 2
|
||||||
}
|
}
|
||||||
} else if t1.Before(t2) {
|
} else if t1.Before(t2) {
|
||||||
if prefer == PreferNewer {
|
if prefer == PreferNewer {
|
||||||
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
|
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
|
||||||
return 2
|
return 2
|
||||||
} else if prefer == PreferOlder {
|
} else if prefer == PreferOlder {
|
||||||
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
|
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if t1.Equal(t2) {
|
if t1.Equal(t2) {
|
||||||
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.Local(), t2.Local(), t2.Sub(t1))
|
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1.In(LogTZ), t2.In(LogTZ), t2.Sub(t1))
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.Local(), t2.Local()) // shouldn't happen unless prefer is of wrong type
|
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1.In(LogTZ), t2.In(LogTZ)) // shouldn't happen unless prefer is of wrong type
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the winning path number, or 0 if winner can't be determined
|
// returns the winning path number, or 0 if winner can't be determined
|
||||||
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1, remote2 string, prefer Prefer) int {
|
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1 string, prefer Prefer) int {
|
||||||
if s1 < 0 || s2 < 0 {
|
if s1 < 0 || s2 < 0 {
|
||||||
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
|
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
|
||||||
return 0
|
return 0
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ func (b *bisyncRun) setResyncDefaults() {
|
|||||||
}
|
}
|
||||||
if b.opt.ResyncMode != PreferNone {
|
if b.opt.ResyncMode != PreferNone {
|
||||||
b.opt.Resync = true
|
b.opt.Resync = true
|
||||||
Opt.Resync = true // shouldn't be using this one, but set to be safe
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checks and warnings
|
// checks and warnings
|
||||||
@@ -41,18 +40,18 @@ func (b *bisyncRun) setResyncDefaults() {
|
|||||||
// It will generate path1 and path2 listings,
|
// It will generate path1 and path2 listings,
|
||||||
// copy any unique files to the opposite path,
|
// copy any unique files to the opposite path,
|
||||||
// and resolve any differing files according to the --resync-mode.
|
// and resolve any differing files according to the --resync-mode.
|
||||||
func (b *bisyncRun) resync(octx, fctx context.Context) error {
|
func (b *bisyncRun) resync(fctx context.Context) (err error) {
|
||||||
fs.Infof(nil, "Copying Path2 files to Path1")
|
fs.Infof(nil, "Copying Path2 files to Path1")
|
||||||
|
|
||||||
// Save blank filelists (will be filled from sync results)
|
// Save blank filelists (will be filled from sync results)
|
||||||
var ls1 = newFileList()
|
ls1 := newFileList()
|
||||||
var ls2 = newFileList()
|
ls2 := newFileList()
|
||||||
err = ls1.save(fctx, b.newListing1)
|
err = ls1.save(b.newListing1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
|
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
|
||||||
b.abort = true
|
b.abort = true
|
||||||
}
|
}
|
||||||
err = ls2.save(fctx, b.newListing2)
|
err = ls2.save(b.newListing2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
|
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
|
||||||
b.abort = true
|
b.abort = true
|
||||||
|
|||||||
@@ -43,15 +43,21 @@ var commandDefinition = &cobra.Command{
|
|||||||
|
|
||||||
You can use it like this to output a single file
|
You can use it like this to output a single file
|
||||||
|
|
||||||
rclone cat remote:path/to/file
|
|||sh
|
||||||
|
rclone cat remote:path/to/file
|
||||||
|
|||
|
||||||
|
|
||||||
Or like this to output any file in dir or its subdirectories.
|
Or like this to output any file in dir or its subdirectories.
|
||||||
|
|
||||||
rclone cat remote:path/to/dir
|
|||sh
|
||||||
|
rclone cat remote:path/to/dir
|
||||||
|
|||
|
||||||
|
|
||||||
Or like this to output any .txt files in dir or its subdirectories.
|
Or like this to output any .txt files in dir or its subdirectories.
|
||||||
|
|
||||||
rclone --include "*.txt" cat remote:path/to/dir
|
|||sh
|
||||||
|
rclone --include "*.txt" cat remote:path/to/dir
|
||||||
|
|||
|
||||||
|
|
||||||
Use the |--head| flag to print characters only at the start, |--tail| for
|
Use the |--head| flag to print characters only at the start, |--tail| for
|
||||||
the end and |--offset| and |--count| to print a section in the middle.
|
the end and |--offset| and |--count| to print a section in the middle.
|
||||||
@@ -62,14 +68,17 @@ Use the |--separator| flag to print a separator value between files. Be sure to
|
|||||||
shell-escape special characters. For example, to print a newline between
|
shell-escape special characters. For example, to print a newline between
|
||||||
files, use:
|
files, use:
|
||||||
|
|
||||||
* bash:
|
- bash:
|
||||||
|
|
||||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
|||sh
|
||||||
|
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||||
|
|||
|
||||||
|
|
||||||
* powershell:
|
- powershell:
|
||||||
|
|
||||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
|||powershell
|
||||||
`, "|", "`"),
|
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||||
|
|||`, "|", "`"),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.33",
|
"versionIntroduced": "v1.33",
|
||||||
"groups": "Filter,Listing",
|
"groups": "Filter,Listing",
|
||||||
|
|||||||
@@ -74,8 +74,7 @@ you what happened to it. These are reminiscent of diff files.
|
|||||||
- |! path| means there was an error reading or hashing the source or dest.
|
- |! path| means there was an error reading or hashing the source or dest.
|
||||||
|
|
||||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||||
option for more information.
|
option for more information.`, "|", "`")
|
||||||
`, "|", "`")
|
|
||||||
|
|
||||||
// GetCheckOpt gets the options corresponding to the check flags
|
// GetCheckOpt gets the options corresponding to the check flags
|
||||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||||
|
|||||||
@@ -17,8 +17,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Use: "cleanup remote:path",
|
Use: "cleanup remote:path",
|
||||||
Short: `Clean up the remote if possible.`,
|
Short: `Clean up the remote if possible.`,
|
||||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||||
versions. Not supported by all remotes.
|
versions. Not supported by all remotes.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.31",
|
"versionIntroduced": "v1.31",
|
||||||
"groups": "Important",
|
"groups": "Important",
|
||||||
|
|||||||
@@ -44,8 +44,7 @@ var configCommand = &cobra.Command{
|
|||||||
Short: `Enter an interactive configuration session.`,
|
Short: `Enter an interactive configuration session.`,
|
||||||
Long: `Enter an interactive configuration session where you can setup new
|
Long: `Enter an interactive configuration session where you can setup new
|
||||||
remotes and manage existing ones. You may also set or remove a
|
remotes and manage existing ones. You may also set or remove a
|
||||||
password to protect your configuration.
|
password to protect your configuration.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.39",
|
"versionIntroduced": "v1.39",
|
||||||
},
|
},
|
||||||
@@ -134,9 +133,7 @@ sensitive info with XXX.
|
|||||||
|
|
||||||
This makes the config file suitable for posting online for support.
|
This makes the config file suitable for posting online for support.
|
||||||
|
|
||||||
It should be double checked before posting as the redaction may not be perfect.
|
It should be double checked before posting as the redaction may not be perfect.`,
|
||||||
|
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.64",
|
"versionIntroduced": "v1.64",
|
||||||
},
|
},
|
||||||
@@ -178,8 +175,8 @@ var configProvidersCommand = &cobra.Command{
|
|||||||
|
|
||||||
var updateRemoteOpt config.UpdateRemoteOpt
|
var updateRemoteOpt config.UpdateRemoteOpt
|
||||||
|
|
||||||
var configPasswordHelp = strings.ReplaceAll(`
|
var configPasswordHelp = strings.ReplaceAll(
|
||||||
Note that if the config process would normally ask a question the
|
`Note that if the config process would normally ask a question the
|
||||||
default is taken (unless |--non-interactive| is used). Each time
|
default is taken (unless |--non-interactive| is used). Each time
|
||||||
that happens rclone will print or DEBUG a message saying how to
|
that happens rclone will print or DEBUG a message saying how to
|
||||||
affect the value taken.
|
affect the value taken.
|
||||||
@@ -205,29 +202,29 @@ it.
|
|||||||
|
|
||||||
This will look something like (some irrelevant detail removed):
|
This will look something like (some irrelevant detail removed):
|
||||||
|
|
||||||
|||
|
|||json
|
||||||
{
|
{
|
||||||
"State": "*oauth-islocal,teamdrive,,",
|
"State": "*oauth-islocal,teamdrive,,",
|
||||||
"Option": {
|
"Option": {
|
||||||
"Name": "config_is_local",
|
"Name": "config_is_local",
|
||||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||||
"Default": true,
|
"Default": true,
|
||||||
"Examples": [
|
"Examples": [
|
||||||
{
|
{
|
||||||
"Value": "true",
|
"Value": "true",
|
||||||
"Help": "Yes"
|
"Help": "Yes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Value": "false",
|
"Value": "false",
|
||||||
"Help": "No"
|
"Help": "No"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Required": false,
|
"Required": false,
|
||||||
"IsPassword": false,
|
"IsPassword": false,
|
||||||
"Type": "bool",
|
"Type": "bool",
|
||||||
"Exclusive": true,
|
"Exclusive": true,
|
||||||
},
|
},
|
||||||
"Error": "",
|
"Error": "",
|
||||||
}
|
}
|
||||||
|||
|
|||
|
||||||
|
|
||||||
@@ -250,7 +247,9 @@ The keys of |Option| are used as follows:
|
|||||||
If |Error| is set then it should be shown to the user at the same
|
If |Error| is set then it should be shown to the user at the same
|
||||||
time as the question.
|
time as the question.
|
||||||
|
|
||||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
|||sh
|
||||||
|
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||||
|
|||
|
||||||
|
|
||||||
Note that when using |--continue| all passwords should be passed in
|
Note that when using |--continue| all passwords should be passed in
|
||||||
the clear (not obscured). Any default config values should be passed
|
the clear (not obscured). Any default config values should be passed
|
||||||
@@ -264,8 +263,7 @@ not just the post config questions. Any parameters are used as
|
|||||||
defaults for questions as usual.
|
defaults for questions as usual.
|
||||||
|
|
||||||
Note that |bin/config.py| in the rclone source implements this protocol
|
Note that |bin/config.py| in the rclone source implements this protocol
|
||||||
as a readable demonstration.
|
as a readable demonstration.`, "|", "`")
|
||||||
`, "|", "`")
|
|
||||||
var configCreateCommand = &cobra.Command{
|
var configCreateCommand = &cobra.Command{
|
||||||
Use: "create name type [key value]*",
|
Use: "create name type [key value]*",
|
||||||
Short: `Create a new remote with name, type and options.`,
|
Short: `Create a new remote with name, type and options.`,
|
||||||
@@ -275,13 +273,18 @@ should be passed in pairs of |key| |value| or as |key=value|.
|
|||||||
For example, to make a swift remote of name myremote using auto config
|
For example, to make a swift remote of name myremote using auto config
|
||||||
you would do:
|
you would do:
|
||||||
|
|
||||||
rclone config create myremote swift env_auth true
|
|||sh
|
||||||
rclone config create myremote swift env_auth=true
|
rclone config create myremote swift env_auth true
|
||||||
|
rclone config create myremote swift env_auth=true
|
||||||
|
|||
|
||||||
|
|
||||||
So for example if you wanted to configure a Google Drive remote but
|
So for example if you wanted to configure a Google Drive remote but
|
||||||
using remote authorization you would do this:
|
using remote authorization you would do this:
|
||||||
|
|
||||||
rclone config create mydrive drive config_is_local=false
|
|||sh
|
||||||
|
rclone config create mydrive drive config_is_local=false
|
||||||
|
|||
|
||||||
|
|
||||||
`, "|", "`") + configPasswordHelp,
|
`, "|", "`") + configPasswordHelp,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.39",
|
"versionIntroduced": "v1.39",
|
||||||
@@ -344,13 +347,18 @@ pairs of |key| |value| or as |key=value|.
|
|||||||
For example, to update the env_auth field of a remote of name myremote
|
For example, to update the env_auth field of a remote of name myremote
|
||||||
you would do:
|
you would do:
|
||||||
|
|
||||||
rclone config update myremote env_auth true
|
|||sh
|
||||||
rclone config update myremote env_auth=true
|
rclone config update myremote env_auth true
|
||||||
|
rclone config update myremote env_auth=true
|
||||||
|
|||
|
||||||
|
|
||||||
If the remote uses OAuth the token will be updated, if you don't
|
If the remote uses OAuth the token will be updated, if you don't
|
||||||
require this add an extra parameter thus:
|
require this add an extra parameter thus:
|
||||||
|
|
||||||
rclone config update myremote env_auth=true config_refresh_token=false
|
|||sh
|
||||||
|
rclone config update myremote env_auth=true config_refresh_token=false
|
||||||
|
|||
|
||||||
|
|
||||||
`, "|", "`") + configPasswordHelp,
|
`, "|", "`") + configPasswordHelp,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.39",
|
"versionIntroduced": "v1.39",
|
||||||
@@ -388,12 +396,13 @@ The |password| should be passed in in clear (unobscured).
|
|||||||
|
|
||||||
For example, to set password of a remote of name myremote you would do:
|
For example, to set password of a remote of name myremote you would do:
|
||||||
|
|
||||||
rclone config password myremote fieldname mypassword
|
|||sh
|
||||||
rclone config password myremote fieldname=mypassword
|
rclone config password myremote fieldname mypassword
|
||||||
|
rclone config password myremote fieldname=mypassword
|
||||||
|
|||
|
||||||
|
|
||||||
This command is obsolete now that "config update" and "config create"
|
This command is obsolete now that "config update" and "config create"
|
||||||
both support obscuring passwords directly.
|
both support obscuring passwords directly.`, "|", "`"),
|
||||||
`, "|", "`"),
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.39",
|
"versionIntroduced": "v1.39",
|
||||||
},
|
},
|
||||||
@@ -441,8 +450,7 @@ var configReconnectCommand = &cobra.Command{
|
|||||||
|
|
||||||
To disconnect the remote use "rclone config disconnect".
|
To disconnect the remote use "rclone config disconnect".
|
||||||
|
|
||||||
This normally means going through the interactive oauth flow again.
|
This normally means going through the interactive oauth flow again.`,
|
||||||
`,
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
@@ -461,8 +469,7 @@ var configDisconnectCommand = &cobra.Command{
|
|||||||
|
|
||||||
This normally means revoking the oauth token.
|
This normally means revoking the oauth token.
|
||||||
|
|
||||||
To reconnect use "rclone config reconnect".
|
To reconnect use "rclone config reconnect".`,
|
||||||
`,
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
f := cmd.NewFsSrc(args)
|
f := cmd.NewFsSrc(args)
|
||||||
@@ -490,8 +497,7 @@ var configUserInfoCommand = &cobra.Command{
|
|||||||
Use: "userinfo remote:",
|
Use: "userinfo remote:",
|
||||||
Short: `Prints info about logged in user of remote.`,
|
Short: `Prints info about logged in user of remote.`,
|
||||||
Long: `This prints the details of the person logged in to the cloud storage
|
Long: `This prints the details of the person logged in to the cloud storage
|
||||||
system.
|
system.`,
|
||||||
`,
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
f := cmd.NewFsSrc(args)
|
f := cmd.NewFsSrc(args)
|
||||||
@@ -534,8 +540,7 @@ var configEncryptionCommand = &cobra.Command{
|
|||||||
Use: "encryption",
|
Use: "encryption",
|
||||||
Short: `set, remove and check the encryption for the config file`,
|
Short: `set, remove and check the encryption for the config file`,
|
||||||
Long: `This command sets, clears and checks the encryption for the config file using
|
Long: `This command sets, clears and checks the encryption for the config file using
|
||||||
the subcommands below.
|
the subcommands below.`,
|
||||||
`,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var configEncryptionSetCommand = &cobra.Command{
|
var configEncryptionSetCommand = &cobra.Command{
|
||||||
@@ -559,8 +564,7 @@ variable to distinguish which password you must supply.
|
|||||||
Alternatively you can remove the password first (with |rclone config
|
Alternatively you can remove the password first (with |rclone config
|
||||||
encryption remove|), then set it again with this command which may be
|
encryption remove|), then set it again with this command which may be
|
||||||
easier if you don't mind the unencrypted config file being on the disk
|
easier if you don't mind the unencrypted config file being on the disk
|
||||||
briefly.
|
briefly.`, "|", "`"),
|
||||||
`, "|", "`"),
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(0, 0, command, args)
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
config.LoadedData()
|
config.LoadedData()
|
||||||
@@ -580,8 +584,7 @@ If |--password-command| is in use, this will be called to supply the old config
|
|||||||
password.
|
password.
|
||||||
|
|
||||||
If the config was not encrypted then no error will be returned and
|
If the config was not encrypted then no error will be returned and
|
||||||
this command will do nothing.
|
this command will do nothing.`, "|", "`"),
|
||||||
`, "|", "`"),
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(0, 0, command, args)
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
config.LoadedData()
|
config.LoadedData()
|
||||||
@@ -600,8 +603,7 @@ It will attempt to decrypt the config using the password you supply.
|
|||||||
If decryption fails it will return a non-zero exit code if using
|
If decryption fails it will return a non-zero exit code if using
|
||||||
|--password-command|, otherwise it will prompt again for the password.
|
|--password-command|, otherwise it will prompt again for the password.
|
||||||
|
|
||||||
If the config file is not encrypted it will return a non zero exit code.
|
If the config file is not encrypted it will return a non zero exit code.`, "|", "`"),
|
||||||
`, "|", "`"),
|
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(0, 0, command, args)
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
config.LoadedData()
|
config.LoadedData()
|
||||||
|
|||||||
@@ -31,18 +31,27 @@ var commandDefinition = &cobra.Command{
|
|||||||
Use: "convmv dest:path --name-transform XXX",
|
Use: "convmv dest:path --name-transform XXX",
|
||||||
Short: `Convert file and directory names in place.`,
|
Short: `Convert file and directory names in place.`,
|
||||||
// Warning¡ "¡" will be replaced by backticks below
|
// Warning¡ "¡" will be replaced by backticks below
|
||||||
Long: strings.ReplaceAll(`
|
Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming
|
||||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
files and directories by applying prefixes, suffixes, and other alterations.
|
||||||
|
|
||||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
`+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact
|
||||||
|
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
|
||||||
|
The replacement string may contain capturing group variables, referencing
|
||||||
|
capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can
|
||||||
|
refer to a named capturing group or it can simply be the index as a number.
|
||||||
|
To insert a literal $, use $$.
|
||||||
|
|
||||||
|
Multiple transformations can be used in sequence, applied
|
||||||
|
in the order they are specified on the command line.
|
||||||
|
|
||||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||||
|
|
||||||
## Files vs Directories
|
### Files vs Directories
|
||||||
|
|
||||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
By default ¡--name-transform¡ will only apply to file names. The means only the
|
||||||
However some of the transforms would be better applied to the whole path or just directories.
|
leaf file name will be transformed. However some of the transforms would be
|
||||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
better applied to the whole path or just directories. To choose which which
|
||||||
|
part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||||
|
|
||||||
| Tag | Effect |
|
| Tag | Effect |
|
||||||
|------|------|
|
|------|------|
|
||||||
@@ -50,42 +59,58 @@ To choose which which part of the file path is affected some tags can be added t
|
|||||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||||
| ¡all¡ | Transform the entire path for files and directories |
|
| ¡all¡ | Transform the entire path for files and directories |
|
||||||
|
|
||||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
This is used by adding the tag into the transform name like this:
|
||||||
|
¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||||
|
|
||||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
For some conversions using all is more likely to be useful, for example
|
||||||
|
¡--name-transform all,nfc¡.
|
||||||
|
|
||||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name.
|
||||||
|
This will cause an error.
|
||||||
|
|
||||||
## Ordering and Conflicts
|
### Ordering and Conflicts
|
||||||
|
|
||||||
* Transformations will be applied in the order specified by the user.
|
- Transformations will be applied in the order specified by the user.
|
||||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
- If the ¡file¡ tag is in use (the default) then only the leaf name of files
|
||||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
will be transformed.
|
||||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
- If the ¡dir¡ tag is in use then directories anywhere in the path will be
|
||||||
* Each transformation will be run one path segment at a time.
|
transformed
|
||||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
- If the ¡all¡ tag is in use then directories and files anywhere in the path
|
||||||
* It is up to the user to put the transformations in a sensible order.
|
will be transformed
|
||||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
- Each transformation will be run one path segment at a time.
|
||||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
- If a transformation adds a ¡/¡ or ends up with an empty path segment then
|
||||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
that will be an error.
|
||||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
- It is up to the user to put the transformations in a sensible order.
|
||||||
transformations using ¡--dry-run¡ before execution.
|
- Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or
|
||||||
|
¡nfc¡ followed by ¡nfd¡, are possible.
|
||||||
|
- Instead of enforcing mutual exclusivity, transformations are applied in
|
||||||
|
sequence as specified by the user, allowing for intentional use cases
|
||||||
|
(e.g., trimming one prefix before adding another).
|
||||||
|
- Users should be aware that certain combinations may lead to unexpected
|
||||||
|
results and should verify transformations using ¡--dry-run¡ before execution.
|
||||||
|
|
||||||
## Race Conditions and Non-Deterministic Behavior
|
### Race Conditions and Non-Deterministic Behavior
|
||||||
|
|
||||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where
|
||||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
multiple source files map to the same destination name. This can lead to race
|
||||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
conditions when performing concurrent transfers. It is up to the user to
|
||||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
anticipate these.
|
||||||
|
|
||||||
|
- If two files from the source are transformed into the same name at the
|
||||||
|
destination, the final state may be non-deterministic.
|
||||||
|
- Running rclone check after a sync using such transformations may erroneously
|
||||||
|
report missing or differing files due to overwritten results.
|
||||||
|
|
||||||
To minimize risks, users should:
|
To minimize risks, users should:
|
||||||
* Carefully review transformations that may introduce conflicts.
|
|
||||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
|
||||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
|
||||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
|
||||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
|
||||||
|
|
||||||
`, "¡", "`"),
|
- Carefully review transformations that may introduce conflicts.
|
||||||
|
- Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind
|
||||||
|
that it won't show the effect of non-deterministic transformations).
|
||||||
|
- Avoid transformations that cause multiple distinct source files to map to the
|
||||||
|
same destination name.
|
||||||
|
- Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||||
|
- Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every
|
||||||
|
time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.70",
|
"versionIntroduced": "v1.70",
|
||||||
"groups": "Filter,Listing,Important,Copy",
|
"groups": "Filter,Listing,Important,Copy",
|
||||||
|
|||||||
@@ -50,22 +50,30 @@ go there.
|
|||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
rclone copy source:sourcepath dest:destpath
|
|||sh
|
||||||
|
rclone copy source:sourcepath dest:destpath
|
||||||
|
|||
|
||||||
|
|
||||||
Let's say there are two files in sourcepath
|
Let's say there are two files in sourcepath
|
||||||
|
|
||||||
sourcepath/one.txt
|
|||text
|
||||||
sourcepath/two.txt
|
sourcepath/one.txt
|
||||||
|
sourcepath/two.txt
|
||||||
|
|||
|
||||||
|
|
||||||
This copies them to
|
This copies them to
|
||||||
|
|
||||||
destpath/one.txt
|
|||text
|
||||||
destpath/two.txt
|
destpath/one.txt
|
||||||
|
destpath/two.txt
|
||||||
|
|||
|
||||||
|
|
||||||
Not to
|
Not to
|
||||||
|
|
||||||
destpath/sourcepath/one.txt
|
|||text
|
||||||
destpath/sourcepath/two.txt
|
destpath/sourcepath/one.txt
|
||||||
|
destpath/sourcepath/two.txt
|
||||||
|
|||
|
||||||
|
|
||||||
If you are familiar with |rsync|, rclone always works as if you had
|
If you are familiar with |rsync|, rclone always works as if you had
|
||||||
written a trailing |/| - meaning "copy the contents of this directory".
|
written a trailing |/| - meaning "copy the contents of this directory".
|
||||||
@@ -81,20 +89,22 @@ For example, if you have many files in /path/to/src but only a few of
|
|||||||
them change every day, you can copy all the files which have changed
|
them change every day, you can copy all the files which have changed
|
||||||
recently very efficiently like this:
|
recently very efficiently like this:
|
||||||
|
|
||||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
|||sh
|
||||||
|
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||||
|
|||
|
||||||
|
|
||||||
Rclone will sync the modification times of files and directories if
|
Rclone will sync the modification times of files and directories if
|
||||||
the backend supports it. If metadata syncing is required then use the
|
the backend supports it. If metadata syncing is required then use the
|
||||||
|--metadata| flag.
|
|--metadata| flag.
|
||||||
|
|
||||||
Note that the modification time and metadata for the root directory
|
Note that the modification time and metadata for the root directory
|
||||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||||
|
|
||||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without
|
||||||
|
copying anything.
|
||||||
|
|
||||||
`, "|", "`") + operationsflags.Help(),
|
`, "|", "`") + operationsflags.Help(),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -35,26 +35,32 @@ name. If the source is a directory then it acts exactly like the
|
|||||||
|
|
||||||
So
|
So
|
||||||
|
|
||||||
rclone copyto src dst
|
` + "```sh" + `
|
||||||
|
rclone copyto src dst
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
where src and dst are rclone paths, either remote:path or
|
where src and dst are rclone paths, either ` + "`remote:path`" + ` or
|
||||||
/path/to/local or C:\windows\path\if\on\windows.
|
` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `.
|
||||||
|
|
||||||
This will:
|
This will:
|
||||||
|
|
||||||
if src is file
|
` + "```text" + `
|
||||||
copy it to dst, overwriting an existing file if it exists
|
if src is file
|
||||||
if src is directory
|
copy it to dst, overwriting an existing file if it exists
|
||||||
copy it to dst, overwriting existing files if they exist
|
if src is directory
|
||||||
see copy command for full details
|
copy it to dst, overwriting existing files if they exist
|
||||||
|
see copy command for full details
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
This doesn't transfer files that are identical on src and dst, testing
|
This doesn't transfer files that are identical on src and dst, testing
|
||||||
by size and modification time or MD5SUM. It doesn't delete files from
|
by size and modification time or MD5SUM. It doesn't delete files from
|
||||||
the destination.
|
the destination.
|
||||||
|
|
||||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
*If you are looking to copy just a byte range of a file, please see
|
||||||
|
` + "`rclone cat --offset X --count Y`" + `.*
|
||||||
|
|
||||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view
|
||||||
|
real-time transfer statistics.
|
||||||
|
|
||||||
` + operationsflags.Help(),
|
` + operationsflags.Help(),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ set in HTTP headers, it will be used instead of the name from the URL.
|
|||||||
With |--print-filename| in addition, the resulting file name will be
|
With |--print-filename| in addition, the resulting file name will be
|
||||||
printed.
|
printed.
|
||||||
|
|
||||||
Setting |--no-clobber| will prevent overwriting file on the
|
Setting |--no-clobber| will prevent overwriting file on the
|
||||||
destination if there is one with the same name.
|
destination if there is one with the same name.
|
||||||
|
|
||||||
Setting |--stdout| or making the output file name |-|
|
Setting |--stdout| or making the output file name |-|
|
||||||
@@ -62,9 +62,7 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
|||||||
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
|
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
|
||||||
- |--bind ::0| to disable IPv4
|
- |--bind ::0| to disable IPv4
|
||||||
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
|
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
|
||||||
- Make sure the site works with |curl| directly
|
- Make sure the site works with |curl| directly`, "|", "`"),
|
||||||
|
|
||||||
`, "|", "`"),
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.43",
|
"versionIntroduced": "v1.43",
|
||||||
"groups": "Important",
|
"groups": "Important",
|
||||||
|
|||||||
@@ -37,14 +37,18 @@ checksum of the file it has just encrypted.
|
|||||||
|
|
||||||
Use it like this
|
Use it like this
|
||||||
|
|
||||||
rclone cryptcheck /path/to/files encryptedremote:path
|
` + "```sh" + `
|
||||||
|
rclone cryptcheck /path/to/files encryptedremote:path
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
You can use it like this also, but that will involve downloading all
|
You can use it like this also, but that will involve downloading all
|
||||||
the files in remote:path.
|
the files in ` + "`remote:path`" + `.
|
||||||
|
|
||||||
rclone cryptcheck remote:path encryptedremote:path
|
` + "```sh" + `
|
||||||
|
rclone cryptcheck remote:path encryptedremote:path
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
After it has run it will log the status of the encryptedremote:.
|
After it has run it will log the status of the ` + "`encryptedremote:`" + `.
|
||||||
` + check.FlagsHelp,
|
` + check.FlagsHelp,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.36",
|
"versionIntroduced": "v1.36",
|
||||||
|
|||||||
@@ -33,13 +33,13 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
|
|||||||
|
|
||||||
use it like this
|
use it like this
|
||||||
|
|
||||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
` + "```sh" + `
|
||||||
|
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||||
|
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `
|
||||||
|
command. See the documentation on the [crypt](/crypt/) overlay for more info.`,
|
||||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
|
||||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.38",
|
"versionIntroduced": "v1.38",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -47,15 +47,15 @@ directories have been merged.
|
|||||||
|
|
||||||
Next, if deduping by name, for every group of duplicate file names /
|
Next, if deduping by name, for every group of duplicate file names /
|
||||||
hashes, it will delete all but one identical file it finds without
|
hashes, it will delete all but one identical file it finds without
|
||||||
confirmation. This means that for most duplicated files the ` +
|
confirmation. This means that for most duplicated files the
|
||||||
"`dedupe`" + ` command will not be interactive.
|
` + "`dedupe`" + ` command will not be interactive.
|
||||||
|
|
||||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||||
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
|
same file path and the same hash. If the backend does not support
|
||||||
Google Drive) then they will never be found to be identical. If you
|
hashes (e.g. crypt wrapping Google Drive) then they will never be found
|
||||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
to be identical. If you use the ` + "`--size-only`" + ` flag then files
|
||||||
identical if they have the same size (any hash will be ignored). This
|
will be considered identical if they have the same size (any hash will be
|
||||||
can be useful on crypt backends which do not support hashes.
|
ignored). This can be useful on crypt backends which do not support hashes.
|
||||||
|
|
||||||
Next rclone will resolve the remaining duplicates. Exactly which
|
Next rclone will resolve the remaining duplicates. Exactly which
|
||||||
action is taken depends on the dedupe mode. By default, rclone will
|
action is taken depends on the dedupe mode. By default, rclone will
|
||||||
@@ -68,71 +68,82 @@ Here is an example run.
|
|||||||
|
|
||||||
Before - with duplicates
|
Before - with duplicates
|
||||||
|
|
||||||
$ rclone lsl drive:dupes
|
` + "```sh" + `
|
||||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
$ rclone lsl drive:dupes
|
||||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||||
|
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Now the ` + "`dedupe`" + ` session
|
Now the ` + "`dedupe`" + ` session
|
||||||
|
|
||||||
$ rclone dedupe drive:dupes
|
` + "```sh" + `
|
||||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
$ rclone dedupe drive:dupes
|
||||||
one.txt: Found 4 files with duplicate names
|
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
one.txt: Found 4 files with duplicate names
|
||||||
one.txt: 2 duplicates remain
|
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
one.txt: 2 duplicates remain
|
||||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||||
s) Skip and do nothing
|
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||||
k) Keep just one (choose which in next step)
|
s) Skip and do nothing
|
||||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
k) Keep just one (choose which in next step)
|
||||||
s/k/r> k
|
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||||
Enter the number of the file to keep> 1
|
s/k/r> k
|
||||||
one.txt: Deleted 1 extra copies
|
Enter the number of the file to keep> 1
|
||||||
two.txt: Found 3 files with duplicate names
|
one.txt: Deleted 1 extra copies
|
||||||
two.txt: 3 duplicates remain
|
two.txt: Found 3 files with duplicate names
|
||||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
two.txt: 3 duplicates remain
|
||||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||||
s) Skip and do nothing
|
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||||
k) Keep just one (choose which in next step)
|
s) Skip and do nothing
|
||||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
k) Keep just one (choose which in next step)
|
||||||
s/k/r> r
|
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||||
two-1.txt: renamed from: two.txt
|
s/k/r> r
|
||||||
two-2.txt: renamed from: two.txt
|
two-1.txt: renamed from: two.txt
|
||||||
two-3.txt: renamed from: two.txt
|
two-2.txt: renamed from: two.txt
|
||||||
|
two-3.txt: renamed from: two.txt
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The result being
|
The result being
|
||||||
|
|
||||||
$ rclone lsl drive:dupes
|
` + "```sh" + `
|
||||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
$ rclone lsl drive:dupes
|
||||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||||
|
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag
|
||||||
|
or by using an extra parameter with the same value
|
||||||
|
|
||||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
- ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
- ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
- ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
- ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
- ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||||
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
- ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||||
* ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
- ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
- ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||||
* ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
- ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||||
|
|
||||||
For example, to rename all the identically named photos in your Google Photos directory, do
|
For example, to rename all the identically named photos in your Google Photos
|
||||||
|
directory, do
|
||||||
|
|
||||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
` + "```sh" + `
|
||||||
|
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
rclone dedupe rename "drive:Google Photos"
|
` + "```sh" + `
|
||||||
`,
|
rclone dedupe rename "drive:Google Photos"
|
||||||
|
` + "```",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.27",
|
"versionIntroduced": "v1.27",
|
||||||
"groups": "Important",
|
"groups": "Important",
|
||||||
|
|||||||
@@ -32,26 +32,29 @@ obeys include/exclude filters so can be used to selectively delete files.
|
|||||||
alone. If you want to delete a directory and all of its contents use
|
alone. If you want to delete a directory and all of its contents use
|
||||||
the [purge](/commands/rclone_purge/) command.
|
the [purge](/commands/rclone_purge/) command.
|
||||||
|
|
||||||
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
|
If you supply the |--rmdirs| flag, it will remove all empty directories along
|
||||||
You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
|
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/)
|
||||||
[rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||||
|
|
||||||
For example, to delete all files bigger than 100 MiB, you may first want to
|
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||||
check what would be deleted (use either):
|
check what would be deleted (use either):
|
||||||
|
|
||||||
rclone --min-size 100M lsl remote:path
|
|||sh
|
||||||
rclone --dry-run --min-size 100M delete remote:path
|
rclone --min-size 100M lsl remote:path
|
||||||
|
rclone --dry-run --min-size 100M delete remote:path
|
||||||
|
|||
|
||||||
|
|
||||||
Then proceed with the actual delete:
|
Then proceed with the actual delete:
|
||||||
|
|
||||||
rclone --min-size 100M delete remote:path
|
|||sh
|
||||||
|
rclone --min-size 100M delete remote:path
|
||||||
|
|||
|
||||||
|
|
||||||
That reads "delete everything with a minimum size of 100 MiB", hence
|
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||||
delete all files bigger than 100 MiB.
|
delete all files bigger than 100 MiB.
|
||||||
|
|
||||||
**Important**: Since this can cause data loss, test first with the
|
**Important**: Since this can cause data loss, test first with the
|
||||||
|--dry-run| or the |--interactive|/|-i| flag.
|
|--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"),
|
||||||
`, "|", "`"),
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.27",
|
"versionIntroduced": "v1.27",
|
||||||
"groups": "Important,Filter,Listing",
|
"groups": "Important,Filter,Listing",
|
||||||
|
|||||||
@@ -19,9 +19,8 @@ var commandDefinition = &cobra.Command{
|
|||||||
Use: "deletefile remote:path",
|
Use: "deletefile remote:path",
|
||||||
Short: `Remove a single file from remote.`,
|
Short: `Remove a single file from remote.`,
|
||||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
remove a directory and it doesn't obey include/exclude filters - if the
|
||||||
it will always be removed.
|
specified file exists, it will always be removed.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.42",
|
"versionIntroduced": "v1.42",
|
||||||
"groups": "Important",
|
"groups": "Important",
|
||||||
|
|||||||
@@ -14,8 +14,7 @@ var completionDefinition = &cobra.Command{
|
|||||||
Use: "completion [shell]",
|
Use: "completion [shell]",
|
||||||
Short: `Output completion script for a given shell.`,
|
Short: `Output completion script for a given shell.`,
|
||||||
Long: `Generates a shell completion script for rclone.
|
Long: `Generates a shell completion script for rclone.
|
||||||
Run with ` + "`--help`" + ` to list the supported shells.
|
Run with ` + "`--help`" + ` to list the supported shells.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.33",
|
"versionIntroduced": "v1.33",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -18,17 +18,21 @@ var bashCommandDefinition = &cobra.Command{
|
|||||||
Short: `Output bash completion script for rclone.`,
|
Short: `Output bash completion script for rclone.`,
|
||||||
Long: `Generates a bash shell autocompletion script for rclone.
|
Long: `Generates a bash shell autocompletion script for rclone.
|
||||||
|
|
||||||
By default, when run without any arguments,
|
By default, when run without any arguments,
|
||||||
|
|
||||||
rclone completion bash
|
` + "```sh" + `
|
||||||
|
rclone completion bash
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
the generated script will be written to
|
the generated script will be written to
|
||||||
|
|
||||||
/etc/bash_completion.d/rclone
|
` + "```sh" + `
|
||||||
|
/etc/bash_completion.d/rclone
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
and so rclone will probably need to be run as root, or with sudo.
|
and so rclone will probably need to be run as root, or with sudo.
|
||||||
|
|
||||||
If you supply a path to a file as the command line argument, then
|
If you supply a path to a file as the command line argument, then
|
||||||
the generated script will be written to that file, in which case
|
the generated script will be written to that file, in which case
|
||||||
you should not need root privileges.
|
you should not need root privileges.
|
||||||
|
|
||||||
@@ -39,11 +43,12 @@ can logout and login again to use the autocompletion script.
|
|||||||
|
|
||||||
Alternatively, you can source the script directly
|
Alternatively, you can source the script directly
|
||||||
|
|
||||||
. /path/to/my_bash_completion_scripts/rclone
|
` + "```sh" + `
|
||||||
|
. /path/to/my_bash_completion_scripts/rclone
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
and the autocompletion functionality will be added to your
|
and the autocompletion functionality will be added to your
|
||||||
current shell.
|
current shell.`,
|
||||||
`,
|
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
out := "/etc/bash_completion.d/rclone"
|
out := "/etc/bash_completion.d/rclone"
|
||||||
|
|||||||
@@ -21,18 +21,21 @@ var fishCommandDefinition = &cobra.Command{
|
|||||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||||
probably need to be run with sudo or as root, e.g.
|
probably need to be run with sudo or as root, e.g.
|
||||||
|
|
||||||
sudo rclone completion fish
|
` + "```sh" + `
|
||||||
|
sudo rclone completion fish
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Logout and login again to use the autocompletion scripts, or source
|
Logout and login again to use the autocompletion scripts, or source
|
||||||
them directly
|
them directly
|
||||||
|
|
||||||
. /etc/fish/completions/rclone.fish
|
` + "```sh" + `
|
||||||
|
. /etc/fish/completions/rclone.fish
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If you supply a command line argument the script will be written
|
If you supply a command line argument the script will be written
|
||||||
there.
|
there.
|
||||||
|
|
||||||
If output_file is "-", then the output will be written to stdout.
|
If output_file is "-", then the output will be written to stdout.`,
|
||||||
`,
|
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
out := "/etc/fish/completions/rclone.fish"
|
out := "/etc/fish/completions/rclone.fish"
|
||||||
|
|||||||
@@ -20,13 +20,14 @@ var powershellCommandDefinition = &cobra.Command{
|
|||||||
|
|
||||||
To load completions in your current shell session:
|
To load completions in your current shell session:
|
||||||
|
|
||||||
rclone completion powershell | Out-String | Invoke-Expression
|
` + "```sh" + `
|
||||||
|
rclone completion powershell | Out-String | Invoke-Expression
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
To load completions for every new session, add the output of the above command
|
To load completions for every new session, add the output of the above command
|
||||||
to your powershell profile.
|
to your powershell profile.
|
||||||
|
|
||||||
If output_file is "-" or missing, then the output will be written to stdout.
|
If output_file is "-" or missing, then the output will be written to stdout.`,
|
||||||
`,
|
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||||
|
|||||||
@@ -21,18 +21,21 @@ var zshCommandDefinition = &cobra.Command{
|
|||||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||||
probably need to be run with sudo or as root, e.g.
|
probably need to be run with sudo or as root, e.g.
|
||||||
|
|
||||||
sudo rclone completion zsh
|
` + "```sh" + `
|
||||||
|
sudo rclone completion zsh
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Logout and login again to use the autocompletion scripts, or source
|
Logout and login again to use the autocompletion scripts, or source
|
||||||
them directly
|
them directly
|
||||||
|
|
||||||
autoload -U compinit && compinit
|
` + "```sh" + `
|
||||||
|
autoload -U compinit && compinit
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If you supply a command line argument the script will be written
|
If you supply a command line argument the script will be written
|
||||||
there.
|
there.
|
||||||
|
|
||||||
If output_file is "-", then the output will be written to stdout.
|
If output_file is "-", then the output will be written to stdout.`,
|
||||||
`,
|
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||||
|
|||||||
@@ -184,7 +184,12 @@ rclone.org website.`,
|
|||||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||||
}
|
}
|
||||||
if endCut >= 0 {
|
if endCut >= 0 {
|
||||||
doc = doc[:endCut] + "### See Also" + doc[endCut+12:]
|
doc = doc[:endCut] + `### See Also
|
||||||
|
|
||||||
|
<!-- markdownlint-capture -->
|
||||||
|
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
|
||||||
|
<!-- markdownlint-restore -->
|
||||||
|
`
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var out strings.Builder
|
var out strings.Builder
|
||||||
@@ -196,7 +201,7 @@ rclone.org website.`,
|
|||||||
if group.Flags.HasFlags() {
|
if group.Flags.HasFlags() {
|
||||||
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
|
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
|
||||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||||
_, _ = out.WriteString("```\n")
|
_, _ = out.WriteString("```text\n")
|
||||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||||
_, _ = out.WriteString("```\n\n")
|
_, _ = out.WriteString("```\n\n")
|
||||||
}
|
}
|
||||||
@@ -204,7 +209,12 @@ rclone.org website.`,
|
|||||||
} else {
|
} else {
|
||||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||||
}
|
}
|
||||||
doc = doc[:startCut] + out.String() + "### See Also" + doc[endCut+12:]
|
doc = doc[:startCut] + out.String() + `### See Also
|
||||||
|
|
||||||
|
<!-- markdownlint-capture -->
|
||||||
|
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
|
||||||
|
<!-- markdownlint-restore -->
|
||||||
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
// outdent all the titles by one
|
// outdent all the titles by one
|
||||||
|
|||||||
@@ -539,7 +539,7 @@ var command = &cobra.Command{
|
|||||||
Aliases: []string{uniqueCommandName},
|
Aliases: []string{uniqueCommandName},
|
||||||
Use: subcommandName,
|
Use: subcommandName,
|
||||||
Short: "Speaks with git-annex over stdin/stdout.",
|
Short: "Speaks with git-annex over stdin/stdout.",
|
||||||
Long: gitannexHelp,
|
Long: strings.TrimSpace(gitannexHelp),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.67.0",
|
"versionIntroduced": "v1.67.0",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -4,8 +4,7 @@ users.
|
|||||||
|
|
||||||
[git-annex]: https://git-annex.branchable.com/
|
[git-annex]: https://git-annex.branchable.com/
|
||||||
|
|
||||||
Installation on Linux
|
### Installation on Linux
|
||||||
---------------------
|
|
||||||
|
|
||||||
1. Skip this step if your version of git-annex is [10.20240430] or newer.
|
1. Skip this step if your version of git-annex is [10.20240430] or newer.
|
||||||
Otherwise, you must create a symlink somewhere on your PATH with a particular
|
Otherwise, you must create a symlink somewhere on your PATH with a particular
|
||||||
|
|||||||
@@ -103,14 +103,17 @@ as a relative path).
|
|||||||
|
|
||||||
Run without a hash to see the list of all supported hashes, e.g.
|
Run without a hash to see the list of all supported hashes, e.g.
|
||||||
|
|
||||||
$ rclone hashsum
|
` + "```sh" + `
|
||||||
` + hash.HelpString(4) + `
|
$ rclone hashsum
|
||||||
|
` + hash.HelpString(0) + "```" + `
|
||||||
|
|
||||||
Then
|
Then
|
||||||
|
|
||||||
$ rclone hashsum MD5 remote:path
|
` + "```sh" + `
|
||||||
|
rclone hashsum MD5 remote:path
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Note that hash names are case insensitive and values are output in lower case.
|
Note that hash names are case insensitive and values are output in lower case.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.41",
|
"versionIntroduced": "v1.41",
|
||||||
"groups": "Filter,Listing",
|
"groups": "Filter,Listing",
|
||||||
|
|||||||
@@ -30,9 +30,7 @@ var Root = &cobra.Command{
|
|||||||
mounting them, listing them in lots of different ways.
|
mounting them, listing them in lots of different ways.
|
||||||
|
|
||||||
See the home page (https://rclone.org/) for installation, usage,
|
See the home page (https://rclone.org/) for installation, usage,
|
||||||
documentation, changelog and configuration walkthroughs.
|
documentation, changelog and configuration walkthroughs.`,
|
||||||
|
|
||||||
`,
|
|
||||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||||
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
||||||
atexit.Run()
|
atexit.Run()
|
||||||
|
|||||||
@@ -29,10 +29,12 @@ var commandDefinition = &cobra.Command{
|
|||||||
Short: `Generate public link to file/folder.`,
|
Short: `Generate public link to file/folder.`,
|
||||||
Long: `Create, retrieve or remove a public link to the given file or folder.
|
Long: `Create, retrieve or remove a public link to the given file or folder.
|
||||||
|
|
||||||
rclone link remote:path/to/file
|
` + "```sh" + `
|
||||||
rclone link remote:path/to/folder/
|
rclone link remote:path/to/file
|
||||||
rclone link --unlink remote:path/to/folder/
|
rclone link remote:path/to/folder/
|
||||||
rclone link --expire 1d remote:path/to/file
|
rclone link --unlink remote:path/to/folder/
|
||||||
|
rclone link --expire 1d remote:path/to/file
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If you supply the --expire flag, it will set the expiration time
|
If you supply the --expire flag, it will set the expiration time
|
||||||
otherwise it will use the default (100 years). **Note** not all
|
otherwise it will use the default (100 years). **Note** not all
|
||||||
@@ -45,9 +47,8 @@ don't will just ignore it.
|
|||||||
|
|
||||||
If successful, the last line of the output will contain the
|
If successful, the last line of the output will contain the
|
||||||
link. Exact capabilities depend on the remote, but the link will
|
link. Exact capabilities depend on the remote, but the link will
|
||||||
always by default be created with the least constraints – e.g. no
|
always by default be created with the least constraints - e.g. no
|
||||||
expiry, no password protection, accessible without account.
|
expiry, no password protection, accessible without account.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.41",
|
"versionIntroduced": "v1.41",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -114,8 +114,7 @@ func newLess(orderBy string) (less lessFn, err error) {
|
|||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "listremotes [<filter>]",
|
Use: "listremotes [<filter>]",
|
||||||
Short: `List all the remotes in the config file and defined in environment variables.`,
|
Short: `List all the remotes in the config file and defined in environment variables.`,
|
||||||
Long: `
|
Long: `Lists all the available remotes from the config file, or the remotes matching
|
||||||
Lists all the available remotes from the config file, or the remotes matching
|
|
||||||
an optional filter.
|
an optional filter.
|
||||||
|
|
||||||
Prints the result in human-readable format by default, and as a simple list of
|
Prints the result in human-readable format by default, and as a simple list of
|
||||||
@@ -126,8 +125,7 @@ the source (file or environment).
|
|||||||
|
|
||||||
Result can be filtered by a filter argument which applies to all attributes,
|
Result can be filtered by a filter argument which applies to all attributes,
|
||||||
and/or filter flags specific for each attribute. The values must be specified
|
and/or filter flags specific for each attribute. The values must be specified
|
||||||
according to regular rclone filtering pattern syntax.
|
according to regular rclone filtering pattern syntax.`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.34",
|
"versionIntroduced": "v1.34",
|
||||||
},
|
},
|
||||||
|
|||||||
14
cmd/ls/ls.go
14
cmd/ls/ls.go
@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
|
|||||||
Long: `Lists the objects in the source path to standard output in a human
|
Long: `Lists the objects in the source path to standard output in a human
|
||||||
readable format with size and path. Recurses by default.
|
readable format with size and path. Recurses by default.
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone ls swift:bucket
|
` + "```sh" + `
|
||||||
60295 bevajer5jef
|
$ rclone ls swift:bucket
|
||||||
90613 canole
|
60295 bevajer5jef
|
||||||
94467 diwogej7
|
90613 canole
|
||||||
37600 fubuwic
|
94467 diwogej7
|
||||||
|
37600 fubuwic
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
` + lshelp.Help,
|
` + lshelp.Help,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -7,16 +7,15 @@ import (
|
|||||||
|
|
||||||
// Help describes the common help for all the list commands
|
// Help describes the common help for all the list commands
|
||||||
// Warning! "|" will be replaced by backticks below
|
// Warning! "|" will be replaced by backticks below
|
||||||
var Help = strings.ReplaceAll(`
|
var Help = strings.ReplaceAll(`Any of the filtering options can be applied to this command.
|
||||||
Any of the filtering options can be applied to this command.
|
|
||||||
|
|
||||||
There are several related list commands
|
There are several related list commands
|
||||||
|
|
||||||
* |ls| to list size and path of objects only
|
- |ls| to list size and path of objects only
|
||||||
* |lsl| to list modification time, size and path of objects only
|
- |lsl| to list modification time, size and path of objects only
|
||||||
* |lsd| to list directories only
|
- |lsd| to list directories only
|
||||||
* |lsf| to list objects and directories in easy to parse format
|
- |lsf| to list objects and directories in easy to parse format
|
||||||
* |lsjson| to list objects and directories in JSON format
|
- |lsjson| to list objects and directories in JSON format
|
||||||
|
|
||||||
|ls|,|lsl|,|lsd| are designed to be human-readable.
|
|ls|,|lsl|,|lsd| are designed to be human-readable.
|
||||||
|lsf| is designed to be human and machine-readable.
|
|lsf| is designed to be human and machine-readable.
|
||||||
@@ -24,9 +23,9 @@ There are several related list commands
|
|||||||
|
|
||||||
Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion.
|
Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion.
|
||||||
|
|
||||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
|
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||||
|
use |-R| to make them recurse.
|
||||||
|
|
||||||
Listing a nonexistent directory will produce an error except for
|
Listing a nonexistent directory will produce an error except for
|
||||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||||
the bucket-based remotes).
|
the bucket-based remotes).`, "|", "`")
|
||||||
`, "|", "`")
|
|
||||||
|
|||||||
@@ -32,18 +32,22 @@ recurse by default. Use the ` + "`-R`" + ` flag to recurse.
|
|||||||
This command lists the total size of the directory (if known, -1 if
|
This command lists the total size of the directory (if known, -1 if
|
||||||
not), the modification time (if known, the current time if not), the
|
not), the modification time (if known, the current time if not), the
|
||||||
number of objects in the directory (if known, -1 if not) and the name
|
number of objects in the directory (if known, -1 if not) and the name
|
||||||
of the directory, Eg
|
of the directory, E.g.
|
||||||
|
|
||||||
$ rclone lsd swift:
|
` + "```sh" + `
|
||||||
494000 2018-04-26 08:43:20 10000 10000files
|
$ rclone lsd swift:
|
||||||
65 2018-04-26 08:43:20 1 1File
|
494000 2018-04-26 08:43:20 10000 10000files
|
||||||
|
65 2018-04-26 08:43:20 1 1File
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
$ rclone lsd drive:test
|
` + "```sh" + `
|
||||||
-1 2016-10-17 17:41:53 -1 1000files
|
$ rclone lsd drive:test
|
||||||
-1 2017-01-03 14:40:54 -1 2500files
|
-1 2016-10-17 17:41:53 -1 1000files
|
||||||
-1 2017-07-08 14:39:28 -1 4000files
|
-1 2017-01-03 14:40:54 -1 2500files
|
||||||
|
-1 2017-07-08 14:39:28 -1 4000files
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `.
|
If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `.
|
||||||
|
|
||||||
|
|||||||
131
cmd/lsf/lsf.go
131
cmd/lsf/lsf.go
@@ -52,41 +52,47 @@ standard output in a form which is easy to parse by scripts. By
|
|||||||
default this will just be the names of the objects and directories,
|
default this will just be the names of the objects and directories,
|
||||||
one per line. The directories will have a / suffix.
|
one per line. The directories will have a / suffix.
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsf swift:bucket
|
` + "```sh" + `
|
||||||
bevajer5jef
|
$ rclone lsf swift:bucket
|
||||||
canole
|
bevajer5jef
|
||||||
diwogej7
|
canole
|
||||||
ferejej3gux/
|
diwogej7
|
||||||
fubuwic
|
ferejej3gux/
|
||||||
|
fubuwic
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Use the ` + "`--format`" + ` option to control what gets listed. By default this
|
Use the ` + "`--format`" + ` option to control what gets listed. By default this
|
||||||
is just the path, but you can use these parameters to control the
|
is just the path, but you can use these parameters to control the
|
||||||
output:
|
output:
|
||||||
|
|
||||||
p - path
|
` + "```text" + `
|
||||||
s - size
|
p - path
|
||||||
t - modification time
|
s - size
|
||||||
h - hash
|
t - modification time
|
||||||
i - ID of object
|
h - hash
|
||||||
o - Original ID of underlying object
|
i - ID of object
|
||||||
m - MimeType of object if known
|
o - Original ID of underlying object
|
||||||
e - encrypted name
|
m - MimeType of object if known
|
||||||
T - tier of storage if known, e.g. "Hot" or "Cool"
|
e - encrypted name
|
||||||
M - Metadata of object in JSON blob format, eg {"key":"value"}
|
T - tier of storage if known, e.g. "Hot" or "Cool"
|
||||||
|
M - Metadata of object in JSON blob format, eg {"key":"value"}
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
So if you wanted the path, size and modification time, you would use
|
So if you wanted the path, size and modification time, you would use
|
||||||
` + "`--format \"pst\"`, or maybe `--format \"tsp\"`" + ` to put the path last.
|
` + "`--format \"pst\"`, or maybe `--format \"tsp\"`" + ` to put the path last.
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsf --format "tsp" swift:bucket
|
` + "```sh" + `
|
||||||
2016-06-25 18:55:41;60295;bevajer5jef
|
$ rclone lsf --format "tsp" swift:bucket
|
||||||
2016-06-25 18:55:43;90613;canole
|
2016-06-25 18:55:41;60295;bevajer5jef
|
||||||
2016-06-25 18:55:43;94467;diwogej7
|
2016-06-25 18:55:43;90613;canole
|
||||||
2018-04-26 08:50:45;0;ferejej3gux/
|
2016-06-25 18:55:43;94467;diwogej7
|
||||||
2016-06-25 18:55:40;37600;fubuwic
|
2018-04-26 08:50:45;0;ferejej3gux/
|
||||||
|
2016-06-25 18:55:40;37600;fubuwic
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
If you specify "h" in the format you will get the MD5 hash by default,
|
If you specify "h" in the format you will get the MD5 hash by default,
|
||||||
use the ` + "`--hash`" + ` flag to change which hash you want. Note that this
|
use the ` + "`--hash`" + ` flag to change which hash you want. Note that this
|
||||||
@@ -97,16 +103,20 @@ type.
|
|||||||
|
|
||||||
For example, to emulate the md5sum command you can use
|
For example, to emulate the md5sum command you can use
|
||||||
|
|
||||||
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
` + "```sh" + `
|
||||||
|
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
` + "```sh" + `
|
||||||
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||||
cd65ac234e6fea5925974a51cdd865cc canole
|
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
||||||
03b5341b4f234b9d984d03ad076bae91 diwogej7
|
cd65ac234e6fea5925974a51cdd865cc canole
|
||||||
8fd37c3810dd660778137ac3a66cc06d fubuwic
|
03b5341b4f234b9d984d03ad076bae91 diwogej7
|
||||||
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
|
8fd37c3810dd660778137ac3a66cc06d fubuwic
|
||||||
|
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
(Though "rclone md5sum ." is an easier way of typing this.)
|
(Though "rclone md5sum ." is an easier way of typing this.)
|
||||||
|
|
||||||
@@ -114,24 +124,28 @@ By default the separator is ";" this can be changed with the
|
|||||||
` + "`--separator`" + ` flag. Note that separators aren't escaped in the path so
|
` + "`--separator`" + ` flag. Note that separators aren't escaped in the path so
|
||||||
putting it last is a good strategy.
|
putting it last is a good strategy.
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
` + "```sh" + `
|
||||||
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
||||||
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
||||||
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
|
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
||||||
2018-04-26 08:52:53,0,,ferejej3gux/
|
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
|
||||||
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
|
2018-04-26 08:52:53,0,,ferejej3gux/
|
||||||
|
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
You can output in CSV standard format. This will escape things in "
|
You can output in CSV standard format. This will escape things in "
|
||||||
if they contain ,
|
if they contain,
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsf --csv --files-only --format ps remote:path
|
` + "```sh" + `
|
||||||
test.log,22355
|
$ rclone lsf --csv --files-only --format ps remote:path
|
||||||
test.sh,449
|
test.log,22355
|
||||||
"this file contains a comma, in the file name.txt",6
|
test.sh,449
|
||||||
|
"this file contains a comma, in the file name.txt",6
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
Note that the ` + "`--absolute`" + ` parameter is useful for making lists of files
|
Note that the ` + "`--absolute`" + ` parameter is useful for making lists of files
|
||||||
to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
||||||
@@ -139,20 +153,25 @@ to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
|||||||
For example, to find all the files modified within one day and copy
|
For example, to find all the files modified within one day and copy
|
||||||
those only (without traversing the whole directory structure):
|
those only (without traversing the whole directory structure):
|
||||||
|
|
||||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
` + "```sh" + `
|
||||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||||
|
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The default time format is ` + "`'2006-01-02 15:04:05'`" + `.
|
The default time format is ` + "`'2006-01-02 15:04:05'`" + `.
|
||||||
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the ` + "`--time-format`" + ` flag.
|
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with
|
||||||
Examples:
|
the ` + "`--time-format`" + ` flag. Examples:
|
||||||
|
|
||||||
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
` + "```sh" + `
|
||||||
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
||||||
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
||||||
rclone lsf remote:path --format pt --time-format RFC3339
|
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
||||||
rclone lsf remote:path --format pt --time-format DateOnly
|
rclone lsf remote:path --format pt --time-format RFC3339
|
||||||
rclone lsf remote:path --format pt --time-format max
|
rclone lsf remote:path --format pt --time-format DateOnly
|
||||||
` + "`--time-format max`" + ` will automatically truncate ` + "'`2006-01-02 15:04:05.000000000`'" + `
|
rclone lsf remote:path --format pt --time-format max
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
|
` + "`--time-format max`" + ` will automatically truncate ` + "`2006-01-02 15:04:05.000000000`" + `
|
||||||
to the maximum precision supported by the remote.
|
to the maximum precision supported by the remote.
|
||||||
|
|
||||||
` + lshelp.Help,
|
` + lshelp.Help,
|
||||||
|
|||||||
@@ -43,25 +43,27 @@ var commandDefinition = &cobra.Command{
|
|||||||
|
|
||||||
The output is an array of Items, where each Item looks like this:
|
The output is an array of Items, where each Item looks like this:
|
||||||
|
|
||||||
{
|
` + "```json" + `
|
||||||
"Hashes" : {
|
{
|
||||||
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
"Hashes" : {
|
||||||
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
||||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
||||||
},
|
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||||
"ID": "y2djkhiujf83u33",
|
},
|
||||||
"OrigID": "UYOJVTUW00Q1RzTDA",
|
"ID": "y2djkhiujf83u33",
|
||||||
"IsBucket" : false,
|
"OrigID": "UYOJVTUW00Q1RzTDA",
|
||||||
"IsDir" : false,
|
"IsBucket" : false,
|
||||||
"MimeType" : "application/octet-stream",
|
"IsDir" : false,
|
||||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
"MimeType" : "application/octet-stream",
|
||||||
"Name" : "file.txt",
|
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||||
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
|
"Name" : "file.txt",
|
||||||
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
|
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
|
||||||
"Path" : "full/path/goes/here/file.txt",
|
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
|
||||||
"Size" : 6,
|
"Path" : "full/path/goes/here/file.txt",
|
||||||
"Tier" : "hot",
|
"Size" : 6,
|
||||||
}
|
"Tier" : "hot",
|
||||||
|
}
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The exact set of properties included depends on the backend:
|
The exact set of properties included depends on the backend:
|
||||||
|
|
||||||
@@ -118,6 +120,7 @@ will be shown ("2017-05-31T16:15:57+01:00").
|
|||||||
The whole output can be processed as a JSON blob, or alternatively it
|
The whole output can be processed as a JSON blob, or alternatively it
|
||||||
can be processed line by line as each item is written on individual lines
|
can be processed line by line as each item is written on individual lines
|
||||||
(except with ` + "`--stat`" + `).
|
(except with ` + "`--stat`" + `).
|
||||||
|
|
||||||
` + lshelp.Help,
|
` + lshelp.Help,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.37",
|
"versionIntroduced": "v1.37",
|
||||||
|
|||||||
@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
|
|||||||
Long: `Lists the objects in the source path to standard output in a human
|
Long: `Lists the objects in the source path to standard output in a human
|
||||||
readable format with modification time, size and path. Recurses by default.
|
readable format with modification time, size and path. Recurses by default.
|
||||||
|
|
||||||
Eg
|
E.g.
|
||||||
|
|
||||||
$ rclone lsl swift:bucket
|
` + "```sh" + `
|
||||||
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
$ rclone lsl swift:bucket
|
||||||
90613 2016-06-25 18:55:43.302607074 canole
|
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
||||||
94467 2016-06-25 18:55:43.046609333 diwogej7
|
90613 2016-06-25 18:55:43.302607074 canole
|
||||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
94467 2016-06-25 18:55:43.046609333 diwogej7
|
||||||
|
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
` + lshelp.Help,
|
` + lshelp.Help,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -35,8 +35,7 @@ to running ` + "`rclone hashsum MD5 remote:path`" + `.
|
|||||||
This command can also hash data received on standard input (stdin),
|
This command can also hash data received on standard input (stdin),
|
||||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||||
when there is data to read (if not, the hyphen will be treated literally,
|
when there is data to read (if not, the hyphen will be treated literally,
|
||||||
as a relative path).
|
as a relative path).`,
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.02",
|
"versionIntroduced": "v1.02",
|
||||||
"groups": "Filter,Listing",
|
"groups": "Filter,Listing",
|
||||||
|
|||||||
@@ -130,6 +130,12 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
|||||||
if node.IsDir() {
|
if node.IsDir() {
|
||||||
dirent.Type = fuse.DT_Dir
|
dirent.Type = fuse.DT_Dir
|
||||||
}
|
}
|
||||||
|
switch node := node.(type) {
|
||||||
|
case *vfs.File:
|
||||||
|
if node.IsSymlink() {
|
||||||
|
dirent.Type = fuse.DT_Link
|
||||||
|
}
|
||||||
|
}
|
||||||
dirents = append(dirents, dirent)
|
dirents = append(dirents, dirent)
|
||||||
}
|
}
|
||||||
itemsRead = len(dirents)
|
itemsRead = len(dirents)
|
||||||
|
|||||||
@@ -273,7 +273,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||||||
Use: commandName + " remote:path /path/to/mountpoint",
|
Use: commandName + " remote:path /path/to/mountpoint",
|
||||||
Hidden: hidden,
|
Hidden: hidden,
|
||||||
Short: `Mount the remote as file system on a mountpoint.`,
|
Short: `Mount the remote as file system on a mountpoint.`,
|
||||||
Long: help(commandName) + vfs.Help(),
|
Long: help(commandName) + strings.TrimSpace(vfs.Help()),
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.33",
|
"versionIntroduced": "v1.33",
|
||||||
"groups": "Filter",
|
"groups": "Filter",
|
||||||
@@ -396,34 +396,14 @@ func (m *MountPoint) Wait() error {
|
|||||||
if err := m.Unmount(); err != nil {
|
if err := m.Unmount(); err != nil {
|
||||||
fs.Errorf(m.MountPoint, "Failed to unmount: %v", err)
|
fs.Errorf(m.MountPoint, "Failed to unmount: %v", err)
|
||||||
} else {
|
} else {
|
||||||
fs.Errorf(m.MountPoint, "Unmounted rclone mount")
|
fs.Logf(m.MountPoint, "Unmounted rclone mount")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
fnHandle := atexit.Register(finalise)
|
fnHandle := atexit.Register(finalise)
|
||||||
defer atexit.Unregister(fnHandle)
|
defer atexit.Unregister(fnHandle)
|
||||||
|
|
||||||
// Reload VFS cache on SIGHUP
|
err := <-m.ErrChan
|
||||||
sigHup := make(chan os.Signal, 1)
|
|
||||||
NotifyOnSigHup(sigHup)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
waiting := true
|
|
||||||
for waiting {
|
|
||||||
select {
|
|
||||||
// umount triggered outside the app
|
|
||||||
case err = <-m.ErrChan:
|
|
||||||
waiting = false
|
|
||||||
// user sent SIGHUP to clear the cache
|
|
||||||
case <-sigHup:
|
|
||||||
root, err := m.VFS.Root()
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err)
|
|
||||||
} else {
|
|
||||||
root.ForgetAll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
finalise()
|
finalise()
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Rclone @ allows Linux, FreeBSD, macOS and Windows to
|
Rclone @ allows Linux, FreeBSD, macOS and Windows to
|
||||||
mount any of Rclone's cloud storage systems as a file system with FUSE.
|
mount any of Rclone's cloud storage systems as a file system with FUSE.
|
||||||
|
|
||||||
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
||||||
|
|
||||||
On Linux and macOS, you can run mount in either foreground or background (aka
|
On Linux and macOS, you can run mount in either foreground or background (aka
|
||||||
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
|
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
|
||||||
@@ -16,7 +16,9 @@ mount, waits until success or timeout and exits with appropriate code
|
|||||||
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
||||||
is an **empty** **existing** directory:
|
is an **empty** **existing** directory:
|
||||||
|
|
||||||
rclone @ remote:path/to/files /path/to/local/mount
|
```sh
|
||||||
|
rclone @ remote:path/to/files /path/to/local/mount
|
||||||
|
```
|
||||||
|
|
||||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||||
for details. If foreground mount is used interactively from a console window,
|
for details. If foreground mount is used interactively from a console window,
|
||||||
@@ -26,26 +28,30 @@ used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
|
|||||||
The following examples will mount to an automatically assigned drive,
|
The following examples will mount to an automatically assigned drive,
|
||||||
to specific drive letter `X:`, to path `C:\path\parent\mount`
|
to specific drive letter `X:`, to path `C:\path\parent\mount`
|
||||||
(where parent directory or drive must exist, and mount must **not** exist,
|
(where parent directory or drive must exist, and mount must **not** exist,
|
||||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)),
|
||||||
the last example will mount as network share `\\cloud\remote` and map it to an
|
and the last example will mount as network share `\\cloud\remote` and map it to an
|
||||||
automatically assigned drive:
|
automatically assigned drive:
|
||||||
|
|
||||||
rclone @ remote:path/to/files *
|
```sh
|
||||||
rclone @ remote:path/to/files X:
|
rclone @ remote:path/to/files *
|
||||||
rclone @ remote:path/to/files C:\path\parent\mount
|
rclone @ remote:path/to/files X:
|
||||||
rclone @ remote:path/to/files \\cloud\remote
|
rclone @ remote:path/to/files C:\path\parent\mount
|
||||||
|
rclone @ remote:path/to/files \\cloud\remote
|
||||||
|
```
|
||||||
|
|
||||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||||
|
|
||||||
When running in background mode the user will have to stop the mount manually:
|
When running in background mode the user will have to stop the mount manually:
|
||||||
|
|
||||||
# Linux
|
```sh
|
||||||
fusermount -u /path/to/local/mount
|
# Linux
|
||||||
#... or on some systems
|
fusermount -u /path/to/local/mount
|
||||||
fusermount3 -u /path/to/local/mount
|
#... or on some systems
|
||||||
# OS X or Linux when using nfsmount
|
fusermount3 -u /path/to/local/mount
|
||||||
umount /path/to/local/mount
|
# OS X or Linux when using nfsmount
|
||||||
|
umount /path/to/local/mount
|
||||||
|
```
|
||||||
|
|
||||||
The umount operation can fail, for example when the mountpoint is busy.
|
The umount operation can fail, for example when the mountpoint is busy.
|
||||||
When that happens, it is the user's responsibility to stop the mount manually.
|
When that happens, it is the user's responsibility to stop the mount manually.
|
||||||
@@ -80,20 +86,22 @@ thumbnails for image and video files on network drives.
|
|||||||
|
|
||||||
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
||||||
However, you can also choose to mount it as a remote network drive, often described
|
However, you can also choose to mount it as a remote network drive, often described
|
||||||
as a network share. If you mount an rclone remote using the default, fixed drive mode
|
as a network share. If you mount an rclone remote using the default, fixed drive
|
||||||
and experience unexpected program errors, freezes or other issues, consider mounting
|
mode and experience unexpected program errors, freezes or other issues, consider
|
||||||
as a network drive instead.
|
mounting as a network drive instead.
|
||||||
|
|
||||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||||
or to a path representing a **nonexistent** subdirectory of an **existing** parent
|
or to a path representing a **nonexistent** subdirectory of an **existing** parent
|
||||||
directory or drive. Using the special value `*` will tell rclone to
|
directory or drive. Using the special value `*` will tell rclone to
|
||||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
automatically assign the next available drive letter, starting with Z: and moving
|
||||||
Examples:
|
backward. Examples:
|
||||||
|
|
||||||
rclone @ remote:path/to/files *
|
```sh
|
||||||
rclone @ remote:path/to/files X:
|
rclone @ remote:path/to/files *
|
||||||
rclone @ remote:path/to/files C:\path\parent\mount
|
rclone @ remote:path/to/files X:
|
||||||
rclone @ remote:path/to/files X:
|
rclone @ remote:path/to/files C:\path\parent\mount
|
||||||
|
rclone @ remote:path/to/files X:
|
||||||
|
```
|
||||||
|
|
||||||
Option `--volname` can be used to set a custom volume name for the mounted
|
Option `--volname` can be used to set a custom volume name for the mounted
|
||||||
file system. The default is to use the remote name and path.
|
file system. The default is to use the remote name and path.
|
||||||
@@ -103,24 +111,28 @@ to your @ command. Mounting to a directory path is not supported in
|
|||||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||||
be mounted to a drive letter.
|
be mounted to a drive letter.
|
||||||
|
|
||||||
rclone @ remote:path/to/files X: --network-mode
|
```sh
|
||||||
|
rclone @ remote:path/to/files X: --network-mode
|
||||||
|
```
|
||||||
|
|
||||||
A volume name specified with `--volname` will be used to create the network share path.
|
A volume name specified with `--volname` will be used to create the network share
|
||||||
A complete UNC path, such as `\\cloud\remote`, optionally with path
|
path. A complete UNC path, such as `\\cloud\remote`, optionally with path
|
||||||
`\\cloud\remote\madeup\path`, will be used as is. Any other
|
`\\cloud\remote\madeup\path`, will be used as is. Any other
|
||||||
string will be used as the share part, after a default prefix `\\server\`.
|
string will be used as the share part, after a default prefix `\\server\`.
|
||||||
If no volume name is specified then `\\server\share` will be used.
|
If no volume name is specified then `\\server\share` will be used.
|
||||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
You must make sure the volume name is unique when you are mounting more than one
|
||||||
or else the mount command will fail. The share name will treated as the volume label for
|
drive, or else the mount command will fail. The share name will treated as the
|
||||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
volume label for the mapped drive, shown in Windows Explorer etc, while the complete
|
||||||
`\\server\share` will be reported as the remote UNC path by
|
`\\server\share` will be reported as the remote UNC path by
|
||||||
`net use` etc, just like a normal network drive mapping.
|
`net use` etc, just like a normal network drive mapping.
|
||||||
|
|
||||||
If you specify a full network share UNC path with `--volname`, this will implicitly
|
If you specify a full network share UNC path with `--volname`, this will implicitly
|
||||||
set the `--network-mode` option, so the following two examples have same result:
|
set the `--network-mode` option, so the following two examples have same result:
|
||||||
|
|
||||||
rclone @ remote:path/to/files X: --network-mode
|
```sh
|
||||||
rclone @ remote:path/to/files X: --volname \\server\share
|
rclone @ remote:path/to/files X: --network-mode
|
||||||
|
rclone @ remote:path/to/files X: --volname \\server\share
|
||||||
|
```
|
||||||
|
|
||||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||||
will automatically assign a drive letter, same as with `*` and use that as
|
will automatically assign a drive letter, same as with `*` and use that as
|
||||||
@@ -128,15 +140,16 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
|
|||||||
specified with the `--volname` option. This will also implicitly set
|
specified with the `--volname` option. This will also implicitly set
|
||||||
the `--network-mode` option. This means the following two examples have same result:
|
the `--network-mode` option. This means the following two examples have same result:
|
||||||
|
|
||||||
rclone @ remote:path/to/files \\cloud\remote
|
```sh
|
||||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
rclone @ remote:path/to/files \\cloud\remote
|
||||||
|
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||||
|
```
|
||||||
|
|
||||||
There is yet another way to enable network mode, and to set the share path,
|
There is yet another way to enable network mode, and to set the share path,
|
||||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||||
`--fuse-flag --VolumePrefix=\server\share`. Note that the path
|
`--fuse-flag --VolumePrefix=\server\share`. Note that the path
|
||||||
must be with just a single backslash prefix in this case.
|
must be with just a single backslash prefix in this case.
|
||||||
|
|
||||||
|
|
||||||
*Note:* In previous versions of rclone this was the only supported method.
|
*Note:* In previous versions of rclone this was the only supported method.
|
||||||
|
|
||||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||||
@@ -149,11 +162,11 @@ The FUSE emulation layer on Windows must convert between the POSIX-based
|
|||||||
permission model used in FUSE, and the permission model used in Windows,
|
permission model used in FUSE, and the permission model used in Windows,
|
||||||
based on access-control lists (ACL).
|
based on access-control lists (ACL).
|
||||||
|
|
||||||
The mounted filesystem will normally get three entries in its access-control list (ACL),
|
The mounted filesystem will normally get three entries in its access-control list
|
||||||
representing permissions for the POSIX permission scopes: Owner, group and others.
|
(ACL), representing permissions for the POSIX permission scopes: Owner, group and
|
||||||
By default, the owner and group will be taken from the current user, and the built-in
|
others. By default, the owner and group will be taken from the current user, and
|
||||||
group "Everyone" will be used to represent others. The user/group can be customized
|
the built-in group "Everyone" will be used to represent others. The user/group can
|
||||||
with FUSE options "UserName" and "GroupName",
|
be customized with FUSE options "UserName" and "GroupName",
|
||||||
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
|
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
|
||||||
The permissions on each entry will be set according to [options](#options)
|
The permissions on each entry will be set according to [options](#options)
|
||||||
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
|
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
|
||||||
@@ -253,58 +266,63 @@ does not suffer from the same limitations.
|
|||||||
|
|
||||||
### Mounting on macOS
|
### Mounting on macOS
|
||||||
|
|
||||||
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
|
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/),
|
||||||
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
|
[macFUSE](https://osxfuse.github.io/) (also known as osxfuse) or
|
||||||
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
|
[FUSE-T](https://www.fuse-t.org/).macFUSE is a traditional FUSE driver utilizing
|
||||||
which "mounts" via an NFSv4 local server.
|
a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which
|
||||||
|
"mounts" via an NFSv4 local server.
|
||||||
|
|
||||||
##### Unicode Normalization
|
#### Unicode Normalization
|
||||||
|
|
||||||
It is highly recommended to keep the default of `--no-unicode-normalization=false`
|
It is highly recommended to keep the default of `--no-unicode-normalization=false`
|
||||||
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
|
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
|
||||||
|
|
||||||
#### NFS mount
|
#### NFS mount
|
||||||
|
|
||||||
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) command and mounts
|
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/)
|
||||||
it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
|
command and mounts it to the specified mountpoint. If you run this in background
|
||||||
send SIGTERM signal to the rclone process using |kill| command to stop the mount.
|
mode using |--daemon|, you will need to send SIGTERM signal to the rclone process
|
||||||
|
using |kill| command to stop the mount.
|
||||||
|
|
||||||
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
|
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file
|
||||||
This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
|
handles stored by the `nfsmount` caching handler. This should not be set too low
|
||||||
|
or you may experience errors when trying to access files. The default is 1000000,
|
||||||
but consider lowering this limit if the server's system resource usage causes problems.
|
but consider lowering this limit if the server's system resource usage causes problems.
|
||||||
|
|
||||||
#### macFUSE Notes
|
#### macFUSE Notes
|
||||||
|
|
||||||
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
|
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases)
|
||||||
the website, rclone will locate the macFUSE libraries without any further intervention.
|
from the website, rclone will locate the macFUSE libraries without any further intervention.
|
||||||
If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
|
If however, macFUSE is installed using the [macports](https://www.macports.org/)
|
||||||
the following addition steps are required.
|
package manager, the following addition steps are required.
|
||||||
|
|
||||||
sudo mkdir /usr/local/lib
|
```sh
|
||||||
cd /usr/local/lib
|
sudo mkdir /usr/local/lib
|
||||||
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
cd /usr/local/lib
|
||||||
|
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
||||||
|
```
|
||||||
|
|
||||||
#### FUSE-T Limitations, Caveats, and Notes
|
#### FUSE-T Limitations, Caveats, and Notes
|
||||||
|
|
||||||
There are some limitations, caveats, and notes about how it works. These are current as
|
There are some limitations, caveats, and notes about how it works. These are
|
||||||
of FUSE-T version 1.0.14.
|
current as of FUSE-T version 1.0.14.
|
||||||
|
|
||||||
##### ModTime update on read
|
##### ModTime update on read
|
||||||
|
|
||||||
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
|
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
|
||||||
|
|
||||||
> File access and modification times cannot be set separately as it seems to be an
|
> File access and modification times cannot be set separately as it seems to be an
|
||||||
> issue with the NFS client which always modifies both. Can be reproduced with
|
> issue with the NFS client which always modifies both. Can be reproduced with
|
||||||
> 'touch -m' and 'touch -a' commands
|
> 'touch -m' and 'touch -a' commands
|
||||||
|
|
||||||
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
|
This means that viewing files with various tools, notably macOS Finder, will cause
|
||||||
to update the modification time of the file. This may make rclone upload a full new copy
|
rlcone to update the modification time of the file. This may make rclone upload a
|
||||||
of the file.
|
full new copy of the file.
|
||||||
|
|
||||||
##### Read Only mounts
|
##### Read Only mounts
|
||||||
|
|
||||||
When mounting with `--read-only`, attempts to write to files will fail *silently* as
|
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||||
opposed to with a clear warning as in macFUSE.
|
as opposed to with a clear warning as in macFUSE.
|
||||||
|
|
||||||
### Limitations
|
### Limitations
|
||||||
|
|
||||||
@@ -316,10 +334,10 @@ See the [VFS File Caching](#vfs-file-caching) section for more info.
|
|||||||
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
|
When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
|
||||||
the mount point will be read-only.
|
the mount point will be read-only.
|
||||||
|
|
||||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
Bucket-based remotes - Azure Blob, Swift, S3, Google Cloud Storage and B2 -
|
||||||
do not support the concept of empty directories, so empty
|
can't store empty directories. Of these, only Azure Blob, Google Cloud Storage
|
||||||
directories will have a tendency to disappear once they fall out of
|
and S3 can preserve them when you add `--xxx-directory_markers`; otherwise,
|
||||||
the directory cache.
|
empty directories will vanish once they drop out of the directory cache.
|
||||||
|
|
||||||
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
|
When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
|
||||||
program will wait for the background mount to become ready or until the timeout
|
program will wait for the background mount to become ready or until the timeout
|
||||||
@@ -405,12 +423,14 @@ helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
|
|||||||
rclone will detect it and translate command-line arguments appropriately.
|
rclone will detect it and translate command-line arguments appropriately.
|
||||||
|
|
||||||
Now you can run classic mounts like this:
|
Now you can run classic mounts like this:
|
||||||
```
|
|
||||||
|
```sh
|
||||||
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|
||||||
```
|
```
|
||||||
|
|
||||||
or create systemd mount units:
|
or create systemd mount units:
|
||||||
```
|
|
||||||
|
```ini
|
||||||
# /etc/systemd/system/mnt-data.mount
|
# /etc/systemd/system/mnt-data.mount
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=Mount for /mnt/data
|
Description=Mount for /mnt/data
|
||||||
@@ -422,7 +442,8 @@ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone
|
|||||||
```
|
```
|
||||||
|
|
||||||
optionally accompanied by systemd automount unit
|
optionally accompanied by systemd automount unit
|
||||||
```
|
|
||||||
|
```ini
|
||||||
# /etc/systemd/system/mnt-data.automount
|
# /etc/systemd/system/mnt-data.automount
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=AutoMount for /mnt/data
|
Description=AutoMount for /mnt/data
|
||||||
@@ -434,7 +455,8 @@ WantedBy=multi-user.target
|
|||||||
```
|
```
|
||||||
|
|
||||||
or add in `/etc/fstab` a line like
|
or add in `/etc/fstab` a line like
|
||||||
```
|
|
||||||
|
```sh
|
||||||
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -65,14 +65,18 @@ This takes the following parameters:
|
|||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
` + "```sh" + `
|
||||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
||||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
||||||
|
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||||
|
` + "```" + `
|
||||||
|
|
||||||
The vfsOpt are as described in options/get and can be seen in the the
|
The vfsOpt are as described in options/get and can be seen in the the
|
||||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||||
|
|
||||||
rclone rc options/get
|
` + "```sh" + `
|
||||||
|
rclone rc options/get
|
||||||
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ the backend supports it. If metadata syncing is required then use the
|
|||||||
|--metadata| flag.
|
|--metadata| flag.
|
||||||
|
|
||||||
Note that the modification time and metadata for the root directory
|
Note that the modification time and metadata for the root directory
|
||||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
will **not** be synced. See <https://github.com/rclone/rclone/issues/7652>
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
**Important**: Since this can cause data loss, test first with the
|
**Important**: Since this can cause data loss, test first with the
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user