mirror of
https://github.com/rclone/rclone.git
synced 2026-01-28 07:13:39 +00:00
Compare commits
148 Commits
press-new-
...
v1.53-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3dfa7d9a3 | ||
|
|
1936847548 | ||
|
|
89b4ccbbfa | ||
|
|
3c985a436b | ||
|
|
703f6002dd | ||
|
|
7de13fc426 | ||
|
|
c2f6d48d45 | ||
|
|
9d9999d17b | ||
|
|
15f31d3ca4 | ||
|
|
0ea51f74a1 | ||
|
|
6cd360233d | ||
|
|
687d2d495b | ||
|
|
50a107a5f3 | ||
|
|
2ed2861d09 | ||
|
|
e2cd449c62 | ||
|
|
98dbbc78ab | ||
|
|
53c4191350 | ||
|
|
e4ece15e68 | ||
|
|
fbf46908bf | ||
|
|
a96539eeec | ||
|
|
86cd5230d7 | ||
|
|
716019cf7d | ||
|
|
c59fe40795 | ||
|
|
ecd60f2430 | ||
|
|
d2a5640c3a | ||
|
|
8d3acfb38c | ||
|
|
200de46249 | ||
|
|
cee618bc03 | ||
|
|
db2aa771dc | ||
|
|
55bd60019e | ||
|
|
c8b11d27e1 | ||
|
|
4c215cc81e | ||
|
|
4df333255a | ||
|
|
843d684568 | ||
|
|
46ea3d93b5 | ||
|
|
89f2d43f17 | ||
|
|
cfc5d76fca | ||
|
|
0af493f693 | ||
|
|
51b3ee9a97 | ||
|
|
6a4b49479d | ||
|
|
4b03ee0f99 | ||
|
|
2f6231f7ac | ||
|
|
c0e6f54f01 | ||
|
|
def7b77d0f | ||
|
|
51b18a4a26 | ||
|
|
7cb76f9054 | ||
|
|
00ccc93482 | ||
|
|
f9fe494d93 | ||
|
|
4a0c266787 | ||
|
|
f48d0a518c | ||
|
|
99ff594773 | ||
|
|
6c140705e3 | ||
|
|
e76963a971 | ||
|
|
43ad7b10a2 | ||
|
|
f6970c65dd | ||
|
|
6012179c67 | ||
|
|
3ecdd4516f | ||
|
|
3b18ba1358 | ||
|
|
5fbbab58ed | ||
|
|
80b93beedf | ||
|
|
eb5c47fcfa | ||
|
|
c7335e780b | ||
|
|
878ebf3658 | ||
|
|
1c860ef252 | ||
|
|
a0494479f9 | ||
|
|
9a9a134188 | ||
|
|
41ccf01f29 | ||
|
|
06f3daa64b | ||
|
|
d5fe63c0a0 | ||
|
|
b7f0e776f6 | ||
|
|
b89f8c05cf | ||
|
|
b81dc16484 | ||
|
|
0e121eeddb | ||
|
|
0430163180 | ||
|
|
09a0dc1600 | ||
|
|
dd11778ac6 | ||
|
|
f36cbe5194 | ||
|
|
82a383588b | ||
|
|
8ae4d2cffe | ||
|
|
0f895c0697 | ||
|
|
937dd7fa1f | ||
|
|
33869387d1 | ||
|
|
3ec8e304b3 | ||
|
|
e62362094e | ||
|
|
6a0398211d | ||
|
|
e5a53d4c65 | ||
|
|
59d5767a07 | ||
|
|
087b5788e2 | ||
|
|
d944bfd936 | ||
|
|
d780fcf317 | ||
|
|
0a9b8eac80 | ||
|
|
1272a8f9a5 | ||
|
|
0b40eaedaf | ||
|
|
8340ff4fb9 | ||
|
|
f5abc168ed | ||
|
|
510ac341e1 | ||
|
|
358e2b2665 | ||
|
|
3305079a03 | ||
|
|
6ed8471a37 | ||
|
|
dc7ce37c32 | ||
|
|
57c10babfe | ||
|
|
23b2c58018 | ||
|
|
78abd21eec | ||
|
|
841edc729c | ||
|
|
b03fcbcc12 | ||
|
|
b60ac7b66a | ||
|
|
725ae91387 | ||
|
|
b7dd3ce608 | ||
|
|
70c8566cb8 | ||
|
|
0d066bdf46 | ||
|
|
3affc2e066 | ||
|
|
23c826db52 | ||
|
|
1ae36a4e32 | ||
|
|
bc969ad244 | ||
|
|
d7ac1f5b0e | ||
|
|
5bf53fe3ac | ||
|
|
9cc17cec9a | ||
|
|
e2816629d0 | ||
|
|
3f0d54daae | ||
|
|
7dcbebf9bc | ||
|
|
c31defbbd3 | ||
|
|
e54ce35019 | ||
|
|
75d54d720c | ||
|
|
cc0421cb9e | ||
|
|
9c01ac9894 | ||
|
|
20300d1f61 | ||
|
|
6231beefc5 | ||
|
|
068cfdaa00 | ||
|
|
7d62d1fc97 | ||
|
|
e13ac28b8d | ||
|
|
b30ee57cd9 | ||
|
|
921e384c4d | ||
|
|
bf685f600e | ||
|
|
b6d3cad70e | ||
|
|
c665201b85 | ||
|
|
d6996e3347 | ||
|
|
dffcc99373 | ||
|
|
09b79679cd | ||
|
|
cf68e61f40 | ||
|
|
22674d1146 | ||
|
|
f9ee0dc3f2 | ||
|
|
65fa6a946a | ||
|
|
4cf82118d9 | ||
|
|
5f56611a76 | ||
|
|
0f7a2f0f3c | ||
|
|
be2b310ace | ||
|
|
45afe97e8e | ||
|
|
fee8f21ce1 |
21
.github/workflows/build.yml
vendored
21
.github/workflows/build.yml
vendored
@@ -46,6 +46,7 @@ jobs:
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
@@ -57,6 +58,7 @@ jobs:
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
@@ -107,10 +109,11 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -125,7 +128,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
brew install --cask osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -133,10 +136,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -223,8 +226,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
|
||||
6383
MANUAL.html
generated
6383
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8070
MANUAL.txt
generated
8070
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
70
Makefile
70
Makefile
@@ -7,24 +7,27 @@ RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# Next version
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
TAG_BRANCH := .$(BRANCH)
|
||||
BRANCH_PATH := branch/$(BRANCH)/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
|
||||
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
|
||||
# TAG is current version + commit number + commit + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
ifdef RELEASE_TAG
|
||||
TAG := $(RELEASE_TAG)
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
ifdef BETA_SUBDIR
|
||||
@@ -43,20 +46,19 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -163,6 +165,11 @@ validate_website: website
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
vendorball:
|
||||
go mod vendor
|
||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||
rm -rf vendor
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
@@ -181,10 +188,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -192,23 +199,23 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
@@ -220,26 +227,33 @@ fetch_binaries:
|
||||
serve: website
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
@echo "Version is $(VERSION)"
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
55
RELEASE.md
55
RELEASE.md
@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
@@ -25,12 +25,13 @@ This file describes how to make the various kinds of releases
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
@@ -41,60 +42,34 @@ Early in the next release cycle update the dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -46,9 +47,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
||||
}
|
||||
|
||||
12
backend/cache/cache.go
vendored
12
backend/cache/cache.go
vendored
@@ -361,15 +361,10 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -390,6 +385,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -120,6 +121,8 @@ const maxTransactionProbes = 100
|
||||
// standard chunker errors
|
||||
var (
|
||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||
ErrMetaTooBig = errors.New("metadata is too big")
|
||||
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
|
||||
)
|
||||
|
||||
// variants of baseMove's parameter delMode
|
||||
@@ -238,15 +241,18 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
baseInfo, baseName, basePath, baseConfig, err := fs.ConfigFs(remote)
|
||||
baseName, basePath, err := fspath.Parse(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
if baseName != "" {
|
||||
baseName += ":"
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := baseInfo.NewFs(baseName, remotePath, baseConfig)
|
||||
baseFs, err := cache.Get(baseName + remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", baseName, remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||
}
|
||||
if !operations.CanServerSideMove(baseFs) {
|
||||
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
|
||||
@@ -258,6 +264,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
|
||||
@@ -271,7 +278,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
|
||||
_, testErr := cache.Get(baseName + firstChunkPath)
|
||||
if testErr == fs.ErrorIsFile {
|
||||
err = testErr
|
||||
}
|
||||
@@ -291,6 +298,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -686,43 +695,47 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
switch entry := dirOrObject.(type) {
|
||||
case fs.Object:
|
||||
remote := entry.Remote()
|
||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactID != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
||||
}
|
||||
break
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
|
||||
if mainRemote == "" {
|
||||
// this is meta object or standalone file
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
break
|
||||
}
|
||||
// this is some kind of chunk
|
||||
// metobject should have been created above if present
|
||||
isSpecial := xactID != "" || ctrlType != ""
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta && !isSpecial {
|
||||
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil && !f.useMeta {
|
||||
// this is the "nometa" case
|
||||
// create dummy chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
if ctrlType != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore control chunk %q", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
if isSpecial {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||
}
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta {
|
||||
fs.Debugf(f, "skip chunk %q without meta object", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil {
|
||||
// useMeta is false - create chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
}
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
// need to read metadata to ensure actual object type
|
||||
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||
mainObject.unsure = true
|
||||
}
|
||||
break
|
||||
}
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
}
|
||||
case fs.Directory:
|
||||
isSubdir[entry.Remote()] = true
|
||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||
@@ -777,6 +790,13 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
|
||||
// scanObject is like NewObject with optional quick scan mode.
|
||||
// The quick mode avoids directory requests other than `List`,
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
}
|
||||
@@ -837,8 +857,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
continue // bypass regexp to save cpu
|
||||
}
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
||||
continue // skip non-conforming, temporary and control chunks
|
||||
if mainRemote == "" || mainRemote != remote {
|
||||
continue // skip non-conforming chunks
|
||||
}
|
||||
if ctrlType != "" || xactID != "" {
|
||||
if f.useMeta {
|
||||
// temporary/control chunk calls for lazy metadata read
|
||||
o.unsure = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||
@@ -848,7 +875,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta {
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -871,8 +898,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// file without metadata. Validate it and update the total data size.
|
||||
// As an optimization, skip metadata reading here - we will call
|
||||
// readMetadata lazily when needed (reading can be expensive).
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
if !quickScan {
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -881,13 +910,24 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if o.isFull {
|
||||
return nil
|
||||
}
|
||||
if !o.isComposite() || !o.f.useMeta {
|
||||
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
|
||||
o.isFull = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate metadata
|
||||
metaObject := o.main
|
||||
if metaObject.Size() > maxMetadataSize {
|
||||
if o.unsure {
|
||||
// this is not metadata but a foreign object
|
||||
o.unsure = false
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
return ErrMetaTooBig
|
||||
}
|
||||
|
||||
reader, err := metaObject.Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -900,8 +940,22 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
|
||||
if err != nil {
|
||||
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
|
||||
if o.unsure {
|
||||
o.unsure = false
|
||||
if !madeByChunker {
|
||||
// this is not metadata but a foreign object
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
// fall thru
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
@@ -916,7 +970,27 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||
target = obj
|
||||
}
|
||||
}
|
||||
if target != nil {
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
}
|
||||
}
|
||||
|
||||
c := f.newChunkingReader(src)
|
||||
wrapIn := c.wrapStream(ctx, in, src)
|
||||
|
||||
@@ -953,6 +1027,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1004,8 +1080,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
// Check for input that looks like valid metadata
|
||||
needMeta := len(c.chunks) > 1
|
||||
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
||||
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
|
||||
needMeta = err == nil
|
||||
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
|
||||
needMeta = madeByChunker
|
||||
}
|
||||
|
||||
// Finalize small object as non-chunked.
|
||||
@@ -1161,10 +1237,14 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
c.chunkLimit = c.chunkSize
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
@@ -1248,29 +1328,16 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to put")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to upload")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
return errors.Wrap(err, "update refused")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to update a file of unsupported format
|
||||
return errors.Wrap(err, "refusing to update")
|
||||
}
|
||||
basePut := o.f.base.Put
|
||||
if src.Size() < 0 {
|
||||
basePut = o.f.base.Features().PutStream
|
||||
@@ -1278,7 +1345,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("wrapped file system does not support streaming uploads")
|
||||
}
|
||||
}
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
|
||||
if err == nil {
|
||||
*o = *oNew.(*Object)
|
||||
}
|
||||
@@ -1392,7 +1459,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
||||
}
|
||||
@@ -1420,6 +1487,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
||||
@@ -1428,11 +1500,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
}
|
||||
return f.newObject("", oResult, nil), nil
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
||||
mainRemote := o.remote
|
||||
@@ -1523,6 +1590,10 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
return
|
||||
}
|
||||
|
||||
if obj.unsure {
|
||||
// ensure object is composite if need to re-read metadata
|
||||
_ = obj.readMetadata(ctx)
|
||||
}
|
||||
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
||||
if !requireMetaHash && !f.hashAll {
|
||||
ok = true // hash is not required for metadata
|
||||
@@ -1706,6 +1777,7 @@ type Object struct {
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
@@ -1856,15 +1928,16 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
if !o.isComposite() {
|
||||
// First, chain to the wrapped non-chunked file if possible.
|
||||
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
|
||||
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
||||
switch hashType {
|
||||
case hash.MD5:
|
||||
@@ -1889,13 +1962,13 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -2168,57 +2241,57 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if data != nil && len(data) > maxMetadataSize {
|
||||
return nil, errors.New("too big")
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, errors.New("invalid json")
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
err = json.Unmarshal(data, &metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
// Basic fields are strictly required
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
||||
return nil, errors.New("missing required field")
|
||||
return nil, false, errors.New("missing required field")
|
||||
}
|
||||
// Perform strict checks, avoid corruption of future metadata formats.
|
||||
if *metadata.Version < 1 {
|
||||
return nil, errors.New("wrong version")
|
||||
return nil, false, errors.New("wrong version")
|
||||
}
|
||||
if *metadata.Size < 0 {
|
||||
return nil, errors.New("negative file size")
|
||||
return nil, false, errors.New("negative file size")
|
||||
}
|
||||
if *metadata.ChunkNum < 0 {
|
||||
return nil, errors.New("negative number of chunks")
|
||||
return nil, false, errors.New("negative number of chunks")
|
||||
}
|
||||
if *metadata.ChunkNum > maxSafeChunkNumber {
|
||||
return nil, ErrChunkOverflow
|
||||
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
|
||||
}
|
||||
if metadata.MD5 != "" {
|
||||
_, err = hex.DecodeString(metadata.MD5)
|
||||
if len(metadata.MD5) != 32 || err != nil {
|
||||
return nil, errors.New("wrong md5 hash")
|
||||
return nil, false, errors.New("wrong md5 hash")
|
||||
}
|
||||
}
|
||||
if metadata.SHA1 != "" {
|
||||
_, err = hex.DecodeString(metadata.SHA1)
|
||||
if len(metadata.SHA1) != 40 || err != nil {
|
||||
return nil, errors.New("wrong sha1 hash")
|
||||
return nil, false, errors.New("wrong sha1 hash")
|
||||
}
|
||||
}
|
||||
// ChunkNum is allowed to be 0 in future versions
|
||||
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
||||
return nil, errors.New("wrong number of chunks")
|
||||
return nil, false, errors.New("wrong number of chunks")
|
||||
}
|
||||
// Non-strict mode also accepts future metadata versions
|
||||
if *metadata.Version > metadataVersion && strictChecks {
|
||||
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
|
||||
if *metadata.Version > metadataVersion {
|
||||
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
|
||||
}
|
||||
|
||||
var nilFs *Fs // nil object triggers appropriate type method
|
||||
@@ -2226,7 +2299,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte,
|
||||
info.nChunks = *metadata.ChunkNum
|
||||
info.md5 = metadata.MD5
|
||||
info.sha1 = metadata.SHA1
|
||||
return info, nil
|
||||
return info, true, nil
|
||||
}
|
||||
|
||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -158,24 +159,25 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
var wrappedFs fs.Fs
|
||||
if rpath == "" {
|
||||
wrappedFs, err = cache.Get(remote)
|
||||
} else {
|
||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -184,6 +186,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
@@ -438,7 +441,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, dir)
|
||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -70,7 +69,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
)
|
||||
@@ -158,6 +157,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -193,7 +203,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
Examples: []fs.OptionExample{{
|
||||
@@ -281,6 +291,11 @@ commands (copy, sync, etc), and with all other commands too.`,
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "starred_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are starred.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: "",
|
||||
@@ -349,17 +364,8 @@ date is used.`,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: `Use alternate export URLs for google documents export.,
|
||||
|
||||
If this option is set this instructs rclone to use an alternate set of
|
||||
export URLs for drive documents. Users have reported that the
|
||||
official export URLs can't export large documents, whereas these
|
||||
unofficial ones can.
|
||||
|
||||
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
|
||||
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
|
||||
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
|
||||
Advanced: true,
|
||||
Help: "Deprecated: no longer needed",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
@@ -513,6 +519,7 @@ type Options struct {
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
StarredOnly bool `config:"starred_only"`
|
||||
Extensions string `config:"formats"`
|
||||
ExportExtensions string `config:"export_formats"`
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
@@ -521,7 +528,6 @@ type Options struct {
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
@@ -696,6 +702,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
query = append(query, q)
|
||||
}
|
||||
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
@@ -707,8 +714,16 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if parentsQuery.Len() > 1 {
|
||||
_, _ = parentsQuery.WriteString(" or ")
|
||||
}
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
if (f.opt.SharedWithMe || f.opt.StarredOnly) && dirID == f.rootFolderID {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
}
|
||||
if f.opt.StarredOnly {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString(" and ")
|
||||
}
|
||||
_, _ = parentsQuery.WriteString("starred=true")
|
||||
}
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
@@ -1238,20 +1253,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := actualID(info.Id)
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
|
||||
if f.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.document":
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.presentation":
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
|
||||
}
|
||||
}
|
||||
url := info.ExportLinks[mediaType]
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
baseObject.bytes = -1
|
||||
baseObject.mimeType = exportMimeType
|
||||
@@ -1628,7 +1630,7 @@ func (s listRSlices) Less(i, j int) bool {
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error, sendJob func(listREntry)) {
|
||||
var dirs []string
|
||||
var paths []string
|
||||
var grouping int32
|
||||
@@ -1709,24 +1711,17 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
var recycled = make([]listREntry, len(dirs))
|
||||
f.listRmu.Lock()
|
||||
for i := range dirs {
|
||||
recycled[i] = listREntry{id: dirs[i], path: paths[i]}
|
||||
// Requeue the jobs
|
||||
job := listREntry{id: dirs[i], path: paths[i]}
|
||||
sendJob(job)
|
||||
// Make a note of these dirs - if they all turn
|
||||
// out to be empty then we can re-enable grouping
|
||||
f.listRempties[dirs[i]] = struct{}{}
|
||||
}
|
||||
f.listRmu.Unlock()
|
||||
// recycle these in the background so we don't deadlock
|
||||
// the listR runners if they all get here
|
||||
wg.Add(len(recycled))
|
||||
go func() {
|
||||
for _, entry := range recycled {
|
||||
in <- entry
|
||||
}
|
||||
fs.Debugf(f, "Recycled %d entries", len(recycled))
|
||||
}()
|
||||
fs.Debugf(f, "Recycled %d entries", len(dirs))
|
||||
}
|
||||
// If using a grouping of 1 and dir was empty then check to see if it
|
||||
// is part of the group that caused grouping to be disabled.
|
||||
@@ -1795,21 +1790,33 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
// Send a job to the input channel if not closed. If the job
|
||||
// won't fit then queue it in the overflow slice.
|
||||
//
|
||||
// This will not block if the channel is full.
|
||||
sendJob := func(job listREntry) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- job:
|
||||
// Adding the wg after we've entered the item is
|
||||
// safe here because we know when the callback
|
||||
// is called we are holding a waitgroup.
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
}
|
||||
if in == nil {
|
||||
return
|
||||
}
|
||||
wg.Add(1)
|
||||
select {
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the entry to the caller, queueing any directories as new jobs
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
if d, isDir := entry.(*fs.Dir); isDir {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
sendJob(job)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
@@ -1818,7 +1825,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb)
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
// wait until the all directories are processed
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -28,6 +29,20 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
|
||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
|
||||
@@ -841,20 +841,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if rewriteResponse.Done {
|
||||
break
|
||||
}
|
||||
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
dstObj.setMetaData(rewriteResponse.Resource)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -353,7 +353,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -373,6 +373,9 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -384,7 +387,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -551,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -728,6 +730,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -1087,8 +1092,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1192,18 +1196,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
@@ -1477,6 +1469,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
|
||||
@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1231,6 +1231,15 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -655,9 +656,14 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
@@ -671,7 +677,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
modTime: time.Unix(mTime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
@@ -1861,30 +1867,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardURL = url
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2116,7 +2122,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2127,10 +2144,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2151,18 +2165,36 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream := &endHandler{
|
||||
wrapStream = &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1247,6 +1247,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
|
||||
@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
|
||||
@@ -104,8 +104,9 @@ type ItemResult struct {
|
||||
|
||||
// Hashes contains the supported hashes
|
||||
type Hashes struct {
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
|
||||
@@ -122,9 +122,19 @@ func init() {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
|
||||
This is normally set when rclone initially does the oauth connection.`,
|
||||
This is normally set when rclone initially does the oauth connection,
|
||||
however you will need to set it by hand if you are using remote config
|
||||
with rclone authorize.
|
||||
`,
|
||||
Default: defaultHostname,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: defaultHostname,
|
||||
Help: "Original/US region",
|
||||
}, {
|
||||
Value: "eapi.pcloud.com",
|
||||
Help: "EU region",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -875,6 +885,13 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
// support SHA256 yet).
|
||||
//
|
||||
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
}
|
||||
|
||||
|
||||
443
backend/s3/s3.go
443
backend/s3/s3.go
@@ -58,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
|
||||
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -94,6 +94,9 @@ func init() {
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
@@ -119,6 +122,9 @@ func init() {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "AWS",
|
||||
@@ -128,12 +134,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
|
||||
@@ -143,9 +149,15 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -161,15 +173,36 @@ func init() {
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
@@ -185,7 +218,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,Scaleway",
|
||||
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
|
||||
@@ -202,107 +235,191 @@ func init() {
|
||||
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-api.us-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.dal.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Dallas Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.wdc.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Washington DC Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.sjc.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region San Jose Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.dal.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Dallas Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.wdc.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region Washington DC Private Endpoint",
|
||||
}, {
|
||||
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.sjc.us.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Cross Region San Jose Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-east.objectstorage.softlayer.net",
|
||||
Value: "s3.us-east.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Region East Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-east.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.us-east.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Region East Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-south.objectstorage.softlayer.net",
|
||||
Value: "s3.us-south.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Region South Endpoint",
|
||||
}, {
|
||||
Value: "s3.us-south.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.us-south.cloud-object-storage.appdomain.cloud",
|
||||
Help: "US Region South Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Endpoint",
|
||||
}, {
|
||||
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.fra.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Frankfurt Endpoint",
|
||||
}, {
|
||||
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.mil.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Milan Endpoint",
|
||||
}, {
|
||||
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.ams.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Amsterdam Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.fra.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Frankfurt Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.mil.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Milan Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.ams.eu.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||
Value: "s3.eu-gb.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Great Britain Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.eu-gb.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Great Britain Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.eu-de.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Region DE Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.eu-de.cloud-object-storage.appdomain.cloud",
|
||||
Help: "EU Region DE Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Endpoint",
|
||||
}, {
|
||||
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.tok.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Tokyo Endpoint",
|
||||
}, {
|
||||
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.hkg.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional HongKong Endpoint",
|
||||
}, {
|
||||
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
|
||||
Value: "s3.seo.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Seoul Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.tok.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Tokyo Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional HongKong Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.seo.ap.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Cross Regional Seoul Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mel01.objectstorage.softlayer.net",
|
||||
Value: "s3.jp-tok.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Region Japan Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.jp-tok.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Region Japan Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.au-syd.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Region Australia Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.au-syd.cloud-object-storage.appdomain.cloud",
|
||||
Help: "APAC Region Australia Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ams03.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Amsterdam Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.ams03.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Amsterdam Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.che01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Chennai Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.che01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Chennai Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mel01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Melbourne Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.mel01.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.mel01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Melbourne Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.tor01.objectstorage.softlayer.net",
|
||||
Value: "s3.osl01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Oslo Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.osl01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Oslo Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.tor01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Toronto Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.tor01.objectstorage.service.networklayer.com",
|
||||
Value: "s3.private.tor01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Toronto Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.seo01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Seoul Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.seo01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Seoul Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mon01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Montreal Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.mon01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Montreal Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mex01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Mexico Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.mex01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Mexico Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.sjc04.cloud-object-storage.appdomain.cloud",
|
||||
Help: "San Jose Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.sjc04.cloud-object-storage.appdomain.cloud",
|
||||
Help: "San Jose Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.mil01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Milan Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.mil01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Milan Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.hkg02.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Hong Kong Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.hkg02.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Hong Kong Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.par01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Paris Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.par01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Paris Single Site Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.sng01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Singapore Single Site Endpoint",
|
||||
}, {
|
||||
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Singapore Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
@@ -392,10 +509,73 @@ func init() {
|
||||
Value: "s3.eu-central-1.stackpathstorage.com",
|
||||
Help: "EU Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Tencent COS API.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cos.ap-beijing.myqcloud.com",
|
||||
Help: "Beijing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-shanghai.myqcloud.com",
|
||||
Help: "Shanghai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-guangzhou.myqcloud.com",
|
||||
Help: "Guangzhou Region.",
|
||||
}, {
|
||||
Value: "cos.ap-nanjing.myqcloud.com",
|
||||
Help: "Nanjing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chengdu.myqcloud.com",
|
||||
Help: "Chengdu Region.",
|
||||
}, {
|
||||
Value: "cos.ap-chongqing.myqcloud.com",
|
||||
Help: "Chongqing Region.",
|
||||
}, {
|
||||
Value: "cos.ap-hongkong.myqcloud.com",
|
||||
Help: "Hong Kong (China) Region.",
|
||||
}, {
|
||||
Value: "cos.ap-singapore.myqcloud.com",
|
||||
Help: "Singapore Region.",
|
||||
}, {
|
||||
Value: "cos.ap-mumbai.myqcloud.com",
|
||||
Help: "Mumbai Region.",
|
||||
}, {
|
||||
Value: "cos.ap-seoul.myqcloud.com",
|
||||
Help: "Seoul Region.",
|
||||
}, {
|
||||
Value: "cos.ap-bangkok.myqcloud.com",
|
||||
Help: "Bangkok Region.",
|
||||
}, {
|
||||
Value: "cos.ap-tokyo.myqcloud.com",
|
||||
Help: "Tokyo Region.",
|
||||
}, {
|
||||
Value: "cos.na-siliconvalley.myqcloud.com",
|
||||
Help: "Silicon Valley Region.",
|
||||
}, {
|
||||
Value: "cos.na-ashburn.myqcloud.com",
|
||||
Help: "Virginia Region.",
|
||||
}, {
|
||||
Value: "cos.na-toronto.myqcloud.com",
|
||||
Help: "Toronto Region.",
|
||||
}, {
|
||||
Value: "cos.eu-frankfurt.myqcloud.com",
|
||||
Help: "Frankfurt Region.",
|
||||
}, {
|
||||
Value: "cos.eu-moscow.myqcloud.com",
|
||||
Help: "Moscow Region.",
|
||||
}, {
|
||||
Value: "cos.accelerate.myqcloud.com",
|
||||
Help: "Use Tencent COS Accelerate Endpoint.",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -435,12 +615,12 @@ func init() {
|
||||
}, {
|
||||
Value: "us-east-2",
|
||||
Help: "US East (Ohio) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "us-west-1",
|
||||
Help: "US West (Northern California) Region.",
|
||||
}, {
|
||||
Value: "us-west-2",
|
||||
Help: "US West (Oregon) Region.",
|
||||
}, {
|
||||
Value: "ca-central-1",
|
||||
Help: "Canada (Central) Region.",
|
||||
@@ -450,9 +630,15 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-west-3",
|
||||
Help: "EU (Paris) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "eu-south-1",
|
||||
Help: "EU (Milan) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -467,16 +653,37 @@ func init() {
|
||||
Help: "Asia Pacific (Tokyo) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-2",
|
||||
Help: "Asia Pacific (Seoul)",
|
||||
Help: "Asia Pacific (Seoul) Region.",
|
||||
}, {
|
||||
Value: "ap-northeast-3",
|
||||
Help: "Asia Pacific (Osaka-Local) Region.",
|
||||
}, {
|
||||
Value: "ap-south-1",
|
||||
Help: "Asia Pacific (Mumbai)",
|
||||
Help: "Asia Pacific (Mumbai) Region.",
|
||||
}, {
|
||||
Value: "ap-east-1",
|
||||
Help: "Asia Pacific (Hong Kong)",
|
||||
Help: "Asia Pacific (Hong Kong) Region.",
|
||||
}, {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (Sao Paulo) Region.",
|
||||
}, {
|
||||
Value: "me-south-1",
|
||||
Help: "Middle East (Bahrain) Region.",
|
||||
}, {
|
||||
Value: "af-south-1",
|
||||
Help: "Africa (Cape Town) Region.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "China (Beijing) Region",
|
||||
}, {
|
||||
Value: "cn-northwest-1",
|
||||
Help: "China (Ningxia) Region.",
|
||||
}, {
|
||||
Value: "us-gov-east-1",
|
||||
Help: "AWS GovCloud (US-East) Region.",
|
||||
}, {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -582,7 +789,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -594,9 +801,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
|
||||
Note that this ACL is applied when server side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
|
||||
Provider: "TencentCOS",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
|
||||
Provider: "!IBMCOS",
|
||||
Provider: "!IBMCOS,TencentCOS",
|
||||
}, {
|
||||
Value: "public-read",
|
||||
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
|
||||
@@ -758,6 +969,24 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||
Provider: "TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage mode.",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
|
||||
Name: "storage_class",
|
||||
@@ -891,7 +1120,7 @@ if false then rclone will use virtual path style. See [the AWS S3
|
||||
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
|
||||
for more info.
|
||||
|
||||
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
|
||||
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
|
||||
false - rclone will do this automatically based on the provider
|
||||
setting.`,
|
||||
Default: true,
|
||||
@@ -980,10 +1209,12 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
|
||||
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
@@ -1219,7 +1450,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
|
||||
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
|
||||
@@ -1501,7 +1732,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
@@ -1883,7 +2114,7 @@ func pathEscape(s string) string {
|
||||
//
|
||||
// It adds the boiler plate to the req passed in and calls the s3
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
@@ -1899,8 +2130,8 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.CopyCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
if src.bytes >= int64(f.opt.CopyCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||
@@ -1921,14 +2152,33 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
return fmt.Sprintf("bytes=%v-%v", start, ends)
|
||||
}
|
||||
|
||||
func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
|
||||
func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) (err error) {
|
||||
info, err := src.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := &s3.CreateMultipartUploadInput{}
|
||||
|
||||
// Fill in the request from the head info
|
||||
structs.SetFrom(req, info)
|
||||
|
||||
// If copy metadata was set then set the Metadata to that read
|
||||
// from the head request
|
||||
if aws.StringValue(copyReq.MetadataDirective) == s3.MetadataDirectiveCopy {
|
||||
copyReq.Metadata = info.Metadata
|
||||
}
|
||||
|
||||
// Overwrite any from the copyReq
|
||||
structs.SetFrom(req, copyReq)
|
||||
|
||||
req.Bucket = &dstBucket
|
||||
req.Key = &dstPath
|
||||
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
})
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -1937,7 +2187,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
// Try to abort the upload, but ignore the error.
|
||||
fs.Debugf(nil, "Cancelling multipart copy")
|
||||
fs.Debugf(src, "Cancelling multipart copy")
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
@@ -1949,33 +2199,23 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
})
|
||||
})()
|
||||
|
||||
srcSize := src.bytes
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
fs.Debugf(src, "Starting multipart copy with %d parts", numParts)
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
uploadPartReq := &s3.UploadPartCopyInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
PartNumber: &partNum,
|
||||
UploadId: uid,
|
||||
CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
|
||||
// Args copy from req
|
||||
CopySource: req.CopySource,
|
||||
CopySourceIfMatch: req.CopySourceIfMatch,
|
||||
CopySourceIfModifiedSince: req.CopySourceIfModifiedSince,
|
||||
CopySourceIfNoneMatch: req.CopySourceIfNoneMatch,
|
||||
CopySourceIfUnmodifiedSince: req.CopySourceIfUnmodifiedSince,
|
||||
CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
|
||||
CopySourceSSECustomerKey: req.CopySourceSSECustomerKey,
|
||||
CopySourceSSECustomerKeyMD5: req.CopySourceSSECustomerKeyMD5,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
}
|
||||
uploadPartReq := &s3.UploadPartCopyInput{}
|
||||
structs.SetFrom(uploadPartReq, copyReq)
|
||||
uploadPartReq.Bucket = &dstBucket
|
||||
uploadPartReq.Key = &dstPath
|
||||
uploadPartReq.PartNumber = &partNum
|
||||
uploadPartReq.UploadId = uid
|
||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
return f.shouldRetry(err)
|
||||
@@ -2028,7 +2268,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2425,19 +2665,12 @@ func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
}
|
||||
var resp *s3.HeadObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||
@@ -2446,12 +2679,26 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if awsErr.StatusCode() == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucket)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
resp, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var size int64
|
||||
// Ignore missing Content-Length assuming it is 0
|
||||
// Some versions of ceph do this due their apache proxies
|
||||
@@ -2522,7 +2769,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||
}
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
@@ -2962,7 +3209,7 @@ func (o *Object) SetTier(tier string) (err error) {
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(tier),
|
||||
}
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -1004,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(int64(library.Size))
|
||||
d.SetSize(library.Size)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -164,6 +164,18 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "subsystem",
|
||||
Default: "sftp",
|
||||
Help: "Specifies the SSH2 subsystem on the remote host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_command",
|
||||
Default: "",
|
||||
Help: `Specifies the path or command to run a sftp server on the remote host.
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -187,6 +199,8 @@ type Options struct {
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -290,7 +304,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
c.sftpClient, err = sftp.NewClient(c.sshClient)
|
||||
c.sftpClient, err = f.newSftpClient(c.sshClient)
|
||||
if err != nil {
|
||||
_ = c.sshClient.Close()
|
||||
return nil, errors.Wrap(err, "couldn't initialise SFTP")
|
||||
@@ -299,6 +313,35 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw, err := s.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr, err := s.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.opt.ServerCommand != "" {
|
||||
if err := s.Start(f.opt.ServerCommand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := s.RequestSubsystem(f.opt.Subsystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
f.poolMu.Lock()
|
||||
@@ -1044,7 +1087,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -1335,7 +1335,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.md5 = rxHeaders["Etag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
if inCount != nil {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -67,31 +66,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
obj := entries[0].(*upstream.Object)
|
||||
return obj.Update(ctx, in, src, options...)
|
||||
}
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(entries))
|
||||
writers := make([]io.Writer, len(entries))
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
for i := range entries {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(entries)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(entries), in)
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
@@ -100,6 +77,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
errs[len(entries)] = <-errChan
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -20,12 +19,10 @@ type EpRand struct {
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
|
||||
@@ -145,11 +145,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -385,6 +390,37 @@ func (f *Fs) DirCacheFlush() {
|
||||
})
|
||||
}
|
||||
|
||||
// Tee in into n outputs
|
||||
//
|
||||
// When finished read the error from the channel
|
||||
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
|
||||
readers := make([]io.Reader, n)
|
||||
pipeWriters := make([]*io.PipeWriter, n)
|
||||
writers := make([]io.Writer, n)
|
||||
errChan := make(chan error, 1)
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], pipeWriters[i], writers[i] = r, w, bw
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, 2*n+1)
|
||||
_, copyErr := io.Copy(mw, in)
|
||||
es[2*n] = copyErr
|
||||
// Flush the buffers
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
// Close the underlying pipes
|
||||
for i, pw := range pipeWriters {
|
||||
es[2*i] = pw.CloseWithError(copyErr)
|
||||
}
|
||||
errChan <- Errors(es).Err()
|
||||
}()
|
||||
return readers, errChan
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
@@ -412,31 +448,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
e, err := f.wrapEntries(u.WrapObject(o))
|
||||
return e.(*Object), err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(upstreams))
|
||||
writers := make([]io.Writer, len(upstreams))
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(upstreams)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(upstreams), in)
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
objs := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
@@ -453,6 +467,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
errs[len(upstreams)] = <-errChan
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -808,6 +823,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -153,3 +153,29 @@ func TestPolicy2(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "all"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,6 +97,7 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = myFs
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
|
||||
@@ -1129,10 +1129,14 @@ func (o *Object) Storable() bool {
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: o.filePath(),
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
@@ -280,7 +281,7 @@ func stripVersion(goarch string) string {
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
@@ -298,11 +299,17 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
}
|
||||
if *buildmode != "" {
|
||||
args = append(args,
|
||||
"-buildmode", *buildmode,
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"..",
|
||||
)
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
@@ -325,7 +332,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
|
||||
@@ -15,10 +15,12 @@ description: |
|
||||
vendor: "rclone"
|
||||
homepage: "https://rclone.org"
|
||||
license: "MIT"
|
||||
# No longer supported? See https://github.com/goreleaser/nfpm/issues/144
|
||||
# bindir: "/usr/bin"
|
||||
files:
|
||||
./rclone: "/usr/bin/rclone"
|
||||
./README.html: "/usr/share/doc/rclone/README.html"
|
||||
./README.txt: "/usr/share/doc/rclone/README.txt"
|
||||
./rclone.1: "/usr/share/man/man1/rclone.1"
|
||||
contents:
|
||||
- src: ./rclone
|
||||
dst: /usr/bin/rclone
|
||||
- src: ./README.html
|
||||
dst: /usr/share/doc/rclone/README.html
|
||||
- src: ./README.txt
|
||||
dst: /usr/share/doc/rclone/README.txt
|
||||
- src: ./rclone.1
|
||||
dst: /usr/share/man/man1/rclone.1
|
||||
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||
and |--error| flags write paths, one per line, to the file name (or
|
||||
stdout if it is |-|) supplied. What they write is described in the
|
||||
help below. For example |--differ| will write all paths which are
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
10
cmd/cmd.go
10
cmd/cmd.go
@@ -9,7 +9,6 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -89,8 +89,10 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
||||
f, err := cache.Get(remote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, path.Base(fsPath)
|
||||
case nil:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, ""
|
||||
default:
|
||||
err = fs.CountError(err)
|
||||
@@ -139,6 +141,7 @@ func newFsDir(remote string) fs.Fs {
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -197,6 +200,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,7 +512,9 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
if err := random.Seed(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
32
cmd/cmount/mount_brew.go
Normal file
32
cmd/cmount/mount_brew.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
|
||||
// +build brew
|
||||
// +build darwin
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "mount"
|
||||
cmd := mountlib.NewMountCommand(name, false, mount)
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
mountlib.AddRc("cmount", mount)
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||
"Please install the binaries available at https://rclone." +
|
||||
"org/downloads/ instead if you want to use the mount command")
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -22,7 +22,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
|
||||
@@ -15,7 +15,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
|
||||
@@ -107,6 +107,13 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
dirents = append(dirents, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: ".",
|
||||
}, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: "..",
|
||||
})
|
||||
for _, node := range items {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// Invert the build constraint: linux,go1.13 freebsd,go1.13
|
||||
//
|
||||
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
||||
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
||||
|
||||
@@ -67,8 +67,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Owner.Gid = vfs.Opt.GID
|
||||
attr.Owner.Uid = vfs.Opt.UID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
|
||||
@@ -192,6 +192,9 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
@@ -333,9 +336,6 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -17,7 +17,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone config file`,
|
||||
Short: `Obscure password for use in the rclone config file.`,
|
||||
Long: `In the rclone config file, human readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -172,8 +173,11 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||
obj := entry.(fs.Object)
|
||||
file := node.(*vfs.File)
|
||||
|
||||
// Set content length since we know how long the object is
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
// Set content length if we know how long the object is
|
||||
knownSize := obj.Size() >= 0
|
||||
if knownSize {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||
}
|
||||
|
||||
// Set content type
|
||||
mimeType := fs.MimeType(r.Context(), obj)
|
||||
@@ -210,5 +214,19 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
||||
// FIXME in = fs.NewAccount(in, obj).WithBuffer() // account the transfer
|
||||
|
||||
// Serve the file
|
||||
http.ServeContent(w, r, remote, node.ModTime(), in)
|
||||
if knownSize {
|
||||
http.ServeContent(w, r, remote, node.ModTime(), in)
|
||||
} else {
|
||||
// http.ServeContent can't serve unknown length files
|
||||
if rangeRequest := r.Header.Get("Range"); rangeRequest != "" {
|
||||
http.Error(w, "Can't use Range: on files of unknown length", http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
n, err := io.Copy(w, in)
|
||||
if err != nil {
|
||||
fs.Errorf(obj, "Didn't finish writing GET request (wrote %d/unknown bytes): %v", n, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -75,6 +75,39 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
return VFS
|
||||
}
|
||||
|
||||
// Accept a single connection - run in a go routine as the ssh
|
||||
// authentication can block
|
||||
func (s *server) acceptConnection(nConn net.Conn) {
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
return
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
}
|
||||
|
||||
// Accept connections and call them in a go routine
|
||||
func (s *server) acceptConnections() {
|
||||
for {
|
||||
nConn, err := s.listener.Accept()
|
||||
@@ -85,33 +118,7 @@ func (s *server) acceptConnections() {
|
||||
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
|
||||
continue
|
||||
}
|
||||
what := describeConn(nConn)
|
||||
|
||||
// Before use, a handshake must be performed on the incoming net.Conn.
|
||||
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
|
||||
if err != nil {
|
||||
fs.Errorf(what, "SSH login failed: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
|
||||
|
||||
// Discard all global out-of-band Requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
continue
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
go s.acceptConnection(nConn)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -66,8 +66,16 @@ Or
|
||||
},
|
||||
}
|
||||
|
||||
// strip a leading v off the string
|
||||
func stripV(s string) string {
|
||||
if len(s) > 0 && s[0] == 'v' {
|
||||
return s[1:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// getVersion gets the version by checking the download repository passed in
|
||||
func getVersion(url string) (v version.Version, vs string, date time.Time, err error) {
|
||||
func getVersion(url string) (v *semver.Version, vs string, date time.Time, err error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return v, vs, date, err
|
||||
@@ -89,16 +97,16 @@ func getVersion(url string) (v version.Version, vs string, date time.Time, err e
|
||||
if err != nil {
|
||||
return v, vs, date, err
|
||||
}
|
||||
v, err = version.New(vs)
|
||||
v, err = semver.NewVersion(stripV(vs))
|
||||
return v, vs, date, err
|
||||
}
|
||||
|
||||
// check the current version against available versions
|
||||
func checkVersion() {
|
||||
// Get Current version
|
||||
vCurrent, err := version.New(fs.Version)
|
||||
vCurrent, err := semver.NewVersion(stripV(fs.Version))
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to get parse version: %v", err)
|
||||
fs.Errorf(nil, "Failed to parse version: %v", err)
|
||||
}
|
||||
const timeFormat = "2006-01-02"
|
||||
|
||||
@@ -108,12 +116,12 @@ func checkVersion() {
|
||||
fs.Errorf(nil, "Failed to get rclone %s version: %v", what, err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("%-8s%-13v %20s\n",
|
||||
fmt.Printf("%-8s%-40v %20s\n",
|
||||
what+":",
|
||||
v,
|
||||
"(released "+t.Format(timeFormat)+")",
|
||||
)
|
||||
if v.Cmp(vCurrent) > 0 {
|
||||
if v.Compare(*vCurrent) > 0 {
|
||||
fmt.Printf(" upgrade: %s\n", url+vs)
|
||||
}
|
||||
}
|
||||
@@ -126,7 +134,7 @@ func checkVersion() {
|
||||
"beta",
|
||||
"https://beta.rclone.org/",
|
||||
)
|
||||
if vCurrent.IsGit() {
|
||||
if strings.HasSuffix(fs.Version, "-DEV") {
|
||||
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
|
||||
@@ -165,7 +165,8 @@ Here are the standard options specific to amazon cloud drive (Amazon Drive).
|
||||
|
||||
#### --acd-client-id
|
||||
|
||||
Amazon Application Client ID.
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ACD_CLIENT_ID
|
||||
@@ -174,7 +175,8 @@ Amazon Application Client ID.
|
||||
|
||||
#### --acd-client-secret
|
||||
|
||||
Amazon Application Client Secret.
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ACD_CLIENT_SECRET
|
||||
@@ -185,10 +187,19 @@ Amazon Application Client Secret.
|
||||
|
||||
Here are the advanced options specific to amazon cloud drive (Amazon Drive).
|
||||
|
||||
#### --acd-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ACD_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --acd-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use Amazon's.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ACD_AUTH_URL
|
||||
@@ -198,7 +209,7 @@ Leave blank to use Amazon's.
|
||||
#### --acd-token-url
|
||||
|
||||
Token server url.
|
||||
leave blank to use Amazon's.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ACD_TOKEN_URL
|
||||
|
||||
@@ -401,3 +401,11 @@ put them back in again.` >}}
|
||||
* David Ibarra <david.ibarra@realty.com>
|
||||
* Tim Gallant <tim@lilt.com>
|
||||
* Kaloyan Raev <kaloyan@storj.io>
|
||||
* Jay McEntire <jay.mcentire@gmail.com>
|
||||
* Leo Luan <leoluan@us.ibm.com>
|
||||
* aus <549081+aus@users.noreply.github.com>
|
||||
* Aaron Gokaslan <agokaslan@fb.com>
|
||||
* Egor Margineanu <egmar@users.noreply.github.com>
|
||||
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
|
||||
* WarpedPixel <WarpedPixel@users.noreply.github.com>
|
||||
* Sam Edwards <sam@samedwards.ca>
|
||||
|
||||
@@ -413,6 +413,20 @@ This value should be set no larger than 4.657GiB (== 5GB).
|
||||
- Type: SizeSuffix
|
||||
- Default: 200M
|
||||
|
||||
#### --b2-copy-cutoff
|
||||
|
||||
Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6GB.
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_B2_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 4G
|
||||
|
||||
#### --b2-chunk-size
|
||||
|
||||
Upload chunk size. Must fit in memory.
|
||||
@@ -467,6 +481,26 @@ The minimum value is 1 second. The maximum value is one week.
|
||||
- Type: Duration
|
||||
- Default: 1w
|
||||
|
||||
#### --b2-memory-pool-flush-time
|
||||
|
||||
How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --b2-memory-pool-use-mmap
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --b2-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -270,7 +270,7 @@ Here are the standard options specific to box (Box).
|
||||
|
||||
#### --box-client-id
|
||||
|
||||
Box App Client Id.
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
@@ -280,7 +280,7 @@ Leave blank normally.
|
||||
|
||||
#### --box-client-secret
|
||||
|
||||
Box App Client Secret
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
@@ -293,11 +293,24 @@ Leave blank normally.
|
||||
Box App config.json location
|
||||
Leave blank normally.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: box_config_file
|
||||
- Env Var: RCLONE_BOX_BOX_CONFIG_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-access-token
|
||||
|
||||
Box App Primary Access Token
|
||||
Leave blank normally.
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_BOX_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-box-sub-type
|
||||
|
||||
|
||||
@@ -316,6 +329,35 @@ Leave blank normally.
|
||||
|
||||
Here are the advanced options specific to box (Box).
|
||||
|
||||
#### --box-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_BOX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_BOX_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_BOX_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-root-folder-id
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
@@ -362,6 +404,7 @@ Note that Box is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Box file names can't have the `\` character in. rclone maps this to
|
||||
and from an identical looking unicode equivalent `\`.
|
||||
and from an identical looking unicode equivalent `\` (U+FF3C Fullwidth
|
||||
Reverse Solidus).
|
||||
|
||||
Box only supports filenames up to 255 characters in length.
|
||||
|
||||
@@ -12,25 +12,27 @@ description: "Rclone Bugs and Limitations"
|
||||
Rclone doesn't currently preserve the timestamps of directories. This
|
||||
is because rclone only really considers objects when syncing.
|
||||
|
||||
### Rclone struggles with millions of files in a directory
|
||||
### Rclone struggles with millions of files in a directory/bucket
|
||||
|
||||
Currently rclone loads each directory entirely into memory before
|
||||
using it. Since each Rclone object takes 0.5k-1k of memory this can
|
||||
take a very long time and use an extremely large amount of memory.
|
||||
Currently rclone loads each directory/bucket entirely into memory before
|
||||
using it. Since each rclone object takes 0.5k-1k of memory this can take
|
||||
a very long time and use a large amount of memory.
|
||||
|
||||
Millions of files in a directory tend caused by software writing cloud
|
||||
storage (eg S3 buckets).
|
||||
Millions of files in a directory tends to occur on bucket-based remotes
|
||||
(e.g. S3 buckets) since those remotes do not segregate subdirectories within
|
||||
the bucket.
|
||||
|
||||
### Bucket based remotes and folders
|
||||
|
||||
Bucket based remotes (eg S3/GCS/Swift/B2) do not have a concept of
|
||||
Bucket based remotes (e.g. S3/GCS/Swift/B2) do not have a concept of
|
||||
directories. Rclone therefore cannot create directories in them which
|
||||
means that empty directories on a bucket based remote will tend to
|
||||
disappear.
|
||||
|
||||
Some software creates empty keys ending in `/` as directory markers.
|
||||
Rclone doesn't do this as it potentially creates more objects and
|
||||
costs more. It may do in future (probably with a flag).
|
||||
costs more. This ability may be added in the future (probably via a
|
||||
flag/option).
|
||||
|
||||
## Bugs
|
||||
|
||||
|
||||
@@ -343,6 +343,8 @@ The username of the Plex user
|
||||
|
||||
The password of the Plex user
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
- Config: plex_password
|
||||
- Env Var: RCLONE_CACHE_PLEX_PASSWORD
|
||||
- Type: string
|
||||
|
||||
@@ -5,6 +5,277 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.3 - 2020-11-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.2...v1.53.3)
|
||||
|
||||
* Bug Fixes
|
||||
* random: Fix incorrect use of math/rand instead of crypto/rand CVE-2020-28924 (Nick Craig-Wood)
|
||||
* Passwords you have generated with `rclone config` may be insecure
|
||||
* See [issue #4783](https://github.com/rclone/rclone/issues/4783) for more details and a checking tool
|
||||
* random: Seed math/rand in one place with crypto strong seed (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix vfs/refresh calls with fs= parameter (Nick Craig-Wood)
|
||||
* Sharefile
|
||||
* Fix backend due to API swapping integers for strings (Nick Craig-Wood)
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Remove new line from end of --stats-one-line display (Nick Craig-Wood)
|
||||
* check
|
||||
* Add back missing --download flag (Nick Craig-Wood)
|
||||
* Fix docs (Nick Craig-Wood)
|
||||
* docs
|
||||
* Note --log-file does append (Nick Craig-Wood)
|
||||
* Add full stops for consistency in rclone --help (edwardxml)
|
||||
* Add Tencent COS to s3 provider list (wjielai)
|
||||
* Updated mount command to reflect that it requires Go 1.13 or newer (Evan Harris)
|
||||
* jottacloud: Mention that uploads from local disk will not need to cache files to disk for md5 calculation (albertony)
|
||||
* Fix formatting of rc docs page (Nick Craig-Wood)
|
||||
* build
|
||||
* Include vendor tar ball in release and fix startdev (Nick Craig-Wood)
|
||||
* Fix "Illegal instruction" error for ARMv6 builds (Nick Craig-Wood)
|
||||
* Fix architecture name in ARMv7 build (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Fix spurious error "vfs cache: failed to _ensure cache EOF" (Nick Craig-Wood)
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Local
|
||||
* Log an ERROR if we fail to set the file to be sparse (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Re-adds special oauth help text (Tim Gallant)
|
||||
* Opendrive
|
||||
* Do not retry 400 errors (Evan Harris)
|
||||
|
||||
## v1.53.0 - 2020-09-02
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.0...v1.53.0)
|
||||
|
||||
* New Features
|
||||
* The [VFS layer](/commands/rclone_mount/#vfs-virtual-file-system) was heavily reworked for this release - see below for more details
|
||||
* Interactive mode [-i/--interactive](/docs/#interactive) for destructive operations (fishbullet)
|
||||
* Add [--bwlimit-file](/docs/#bwlimit-file-bandwidth-spec) flag to limit speeds of individual file transfers (Nick Craig-Wood)
|
||||
* Transfers are sorted by start time in the stats and progress output (Max Sum)
|
||||
* Make sure backends expand `~` and environment vars in file names they use (Nick Craig-Wood)
|
||||
* Add [--refresh-times](/docs/#refresh-times) flag to set modtimes on hashless backends (Nick Craig-Wood)
|
||||
* build
|
||||
* Remove vendor directory in favour of Go modules (Nick Craig-Wood)
|
||||
* Build with go1.15.x by default (Nick Craig-Wood)
|
||||
* Drop macOS 386 build as it is no longer supported by go1.15 (Nick Craig-Wood)
|
||||
* Add ARMv7 to the supported builds (Nick Craig-Wood)
|
||||
* Enable `rclone cmount` on macOS (Nick Craig-Wood)
|
||||
* Make rclone build with gccgo (Nick Craig-Wood)
|
||||
* Make rclone build with wasm (Nick Craig-Wood)
|
||||
* Change beta numbering to be semver compatible (Nick Craig-Wood)
|
||||
* Add file properties and icon to Windows executable (albertony)
|
||||
* Add experimental interface for integrating rclone into browsers (Nick Craig-Wood)
|
||||
* lib: Add file name compression (Klaus Post)
|
||||
* rc
|
||||
* Allow installation and use of plugins and test plugins with rclone-webui (Chaitanya Bankanhal)
|
||||
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
|
||||
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
|
||||
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
|
||||
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal)
|
||||
* `rclone check`
|
||||
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
||||
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
|
||||
* Make check do `--checkers` files concurrently (Nick Craig-Wood)
|
||||
* Retry downloads if they fail when using the `--download` flag (Nick Craig-Wood)
|
||||
* Make it show stats by default (Nick Craig-Wood)
|
||||
* `rclone obscure`: Allow obscure command to accept password on STDIN (David Ibarra)
|
||||
* `rclone config`
|
||||
* Set RCLONE_CONFIG_DIR for use in config files and subprocesses (Nick Craig-Wood)
|
||||
* Reject remote names starting with a dash. (jtagcat)
|
||||
* `rclone cryptcheck`: Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
|
||||
* `rclone dedupe`: Make it obey the `--size-only` flag for duplicate detection (Nick Craig-Wood)
|
||||
* `rclone link`: Add `--expire` and `--unlink` flags (Roman Kredentser)
|
||||
* `rclone mkdir`: Warn when using mkdir on remotes which can't have empty directories (Nick Craig-Wood)
|
||||
* `rclone rc`: Allow JSON parameters to simplify command line usage (Nick Craig-Wood)
|
||||
* `rclone serve ftp`
|
||||
* Don't compile on < go1.13 after dependency update (Nick Craig-Wood)
|
||||
* Add error message if auth proxy fails (Nick Craig-Wood)
|
||||
* Use refactored goftp.io/server library for binary shrink (Nick Craig-Wood)
|
||||
* `rclone serve restic`: Expose interfaces so that rclone can be used as a library from within restic (Jack)
|
||||
* `rclone sync`: Add `--track-renames-strategy leaf` (Nick Craig-Wood)
|
||||
* `rclone touch`: Add ability to set nanosecond resolution times (Nick Craig-Wood)
|
||||
* `rclone tree`: Remove `-i` shorthand for `--noindent` as it conflicts with `-i`/`--interactive` (Nick Craig-Wood)
|
||||
* Bug Fixes
|
||||
* accounting
|
||||
* Fix documentation for `speed`/`speedAvg` (Nick Craig-Wood)
|
||||
* Fix elapsed time not show actual time since beginning (Chaitanya Bankanhal)
|
||||
* Fix deadlock in stats printing (Nick Craig-Wood)
|
||||
* build
|
||||
* Fix file handle leak in GitHub release tool (Garrett Squire)
|
||||
* `rclone check`: Fix successful retries with `--download` counting errors (Nick Craig-Wood)
|
||||
* `rclone dedupe`: Fix logging to be easier to understand (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Warn macOS users that mount implementation is changing (Nick Craig-Wood)
|
||||
* to test the new implementation use `rclone cmount` instead of `rclone mount`
|
||||
* this is because the library rclone uses has dropped macOS support
|
||||
* rc interface
|
||||
* Add call for unmount all (Chaitanya Bankanhal)
|
||||
* Make `mount/mount` remote control take vfsOpt option (Nick Craig-Wood)
|
||||
* Add mountOpt to `mount/mount` (Nick Craig-Wood)
|
||||
* Add VFS and Mount options to `mount/listmounts` (Nick Craig-Wood)
|
||||
* Catch panics in cgofuse initialization and turn into error messages (Nick Craig-Wood)
|
||||
* Always supply stat information in Readdir (Nick Craig-Wood)
|
||||
* Add support for reading unknown length files using direct IO (Windows) (Nick Craig-Wood)
|
||||
* Fix On Windows don't add `-o uid/gid=-1` if user supplies `-o uid/gid`. (Nick Craig-Wood)
|
||||
* Fix macOS losing directory contents in cmount (Nick Craig-Wood)
|
||||
* Fix volume name broken in recent refactor (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Implement partial reads for `--vfs-cache-mode full` (Nick Craig-Wood)
|
||||
* Add `--vfs-writeback` option to delay writes back to cloud storage (Nick Craig-Wood)
|
||||
* Add `--vfs-read-ahead` parameter for use with `--vfs-cache-mode full` (Nick Craig-Wood)
|
||||
* Restart pending uploads on restart of the cache (Nick Craig-Wood)
|
||||
* Support synchronous cache space recovery upon ENOSPC (Leo Luan)
|
||||
* Allow ReadAt and WriteAt to run concurrently with themselves (Nick Craig-Wood)
|
||||
* Change modtime of file before upload to current (Rob Calistri)
|
||||
* Recommend `--vfs-cache-modes writes` on backends which can't stream (Nick Craig-Wood)
|
||||
* Add an optional `fs` parameter to vfs rc methods (Nick Craig-Wood)
|
||||
* Fix errors when using > 260 char files in the cache in Windows (Nick Craig-Wood)
|
||||
* Fix renaming of items while they are being uploaded (Nick Craig-Wood)
|
||||
* Fix very high load caused by slow directory listings (Nick Craig-Wood)
|
||||
* Fix renamed files not being uploaded with `--vfs-cache-mode minimal` (Nick Craig-Wood)
|
||||
* Fix directory locking caused by slow directory listings (Nick Craig-Wood)
|
||||
* Fix saving from chrome without `--vfs-cache-mode writes` (Nick Craig-Wood)
|
||||
* Local
|
||||
* Add `--local-no-updated` to provide a consistent view of changing objects (Nick Craig-Wood)
|
||||
* Add `--local-no-set-modtime` option to prevent modtime changes (tyhuber1)
|
||||
* Fix race conditions updating and reading Object metadata (Nick Craig-Wood)
|
||||
* Cache
|
||||
* Make any created backends be cached to fix rc problems (Nick Craig-Wood)
|
||||
* Fix dedupe on caches wrapping drives (Nick Craig-Wood)
|
||||
* Crypt
|
||||
* Add `--crypt-server-side-across-configs` flag (Nick Craig-Wood)
|
||||
* Make any created backends be cached to fix rc problems (Nick Craig-Wood)
|
||||
* Alias
|
||||
* Make any created backends be cached to fix rc problems (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Don't compile on < go1.13 after dependency update (Nick Craig-Wood)
|
||||
* B2
|
||||
* Implement server side copy for files > 5GB (Nick Craig-Wood)
|
||||
* Cancel in progress multipart uploads and copies on rclone exit (Nick Craig-Wood)
|
||||
* Note that b2's encoding now allows \ but rclone's hasn't changed (Nick Craig-Wood)
|
||||
* Fix transfers when using download_url (Nick Craig-Wood)
|
||||
* Box
|
||||
* Implement rclone cleanup (buengese)
|
||||
* Cancel in progress multipart uploads and copies on rclone exit (Nick Craig-Wood)
|
||||
* Allow authentication with access token (David)
|
||||
* Chunker
|
||||
* Make any created backends be cached to fix rc problems (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Add `rclone backend drives` to list shared drives (teamdrives) (Nick Craig-Wood)
|
||||
* Implement `rclone backend untrash` (Nick Craig-Wood)
|
||||
* Work around drive bug which didn't set modtime of copied docs (Nick Craig-Wood)
|
||||
* Added `--drive-starred-only` to only show starred files (Jay McEntire)
|
||||
* Deprecate `--drive-alternate-export` as it is no longer needed (themylogin)
|
||||
* Fix duplication of Google docs on server side copy (Nick Craig-Wood)
|
||||
* Fix "panic: send on closed channel" when recycling dir entries (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Add copyright detector info in limitations section in the docs (Alex Guerrero)
|
||||
* Fix `rclone link` by removing expires parameter (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Detect Flood detected: IP Locked error and sleep for 30s (Nick Craig-Wood)
|
||||
* FTP
|
||||
* Add explicit TLS support (Heiko Bornholdt)
|
||||
* Add support for `--dump bodies` and `--dump auth` for debugging (Nick Craig-Wood)
|
||||
* Fix interoperation with pure-ftpd (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Add support for anonymous access (Kai Lüke)
|
||||
* Jottacloud
|
||||
* Bring back legacy authentification for use with whitelabel versions (buengese)
|
||||
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
|
||||
* Onedrive
|
||||
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
|
||||
* Implement rclone cleanup (Nick Craig-Wood)
|
||||
* Add `--onedrive-no-versions` flag to remove old versions (Nick Craig-Wood)
|
||||
* Pcloud
|
||||
* Implement `rclone link` for public link creation (buengese)
|
||||
* Qingstor
|
||||
* Cancel in progress multipart uploads on rclone exit (Nick Craig-Wood)
|
||||
* S3
|
||||
* Preserve metadata when doing multipart copy (Nick Craig-Wood)
|
||||
* Cancel in progress multipart uploads and copies on rclone exit (Nick Craig-Wood)
|
||||
* Add `rclone link` for public link sharing (Roman Kredentser)
|
||||
* Add `rclone backend restore` command to restore objects from GLACIER (Nick Craig-Wood)
|
||||
* Add `rclone cleanup` and `rclone backend cleanup` to clean unfinished multipart uploads (Nick Craig-Wood)
|
||||
* Add `rclone backend list-multipart-uploads` to list unfinished multipart uploads (Nick Craig-Wood)
|
||||
* Add `--s3-max-upload-parts` support (Kamil Trzciński)
|
||||
* Add `--s3-no-check-bucket` for minimising rclone transactions and perms (Nick Craig-Wood)
|
||||
* Add `--s3-profile` and `--s3-shared-credentials-file` options (Nick Craig-Wood)
|
||||
* Use regional s3 us-east-1 endpoint (David)
|
||||
* Add Scaleway provider (Vincent Feltz)
|
||||
* Update IBM COS endpoints (Egor Margineanu)
|
||||
* Reduce the default `--s3-copy-cutoff` to < 5GB for Backblaze S3 compatibility (Nick Craig-Wood)
|
||||
* Fix detection of bucket existing (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Use the absolute path instead of the relative path for listing for improved compatibility (Nick Craig-Wood)
|
||||
* Add `--sftp-subsystem` and `--sftp-server-command` options (aus)
|
||||
* Swift
|
||||
* Fix dangling large objects breaking the listing (Nick Craig-Wood)
|
||||
* Fix purge not deleting directory markers (Nick Craig-Wood)
|
||||
* Fix update multipart object removing all of its own parts (Nick Craig-Wood)
|
||||
* Fix missing hash from object returned from upload (Nick Craig-Wood)
|
||||
* Tardigrade
|
||||
* Upgrade to uplink v1.2.0 (Kaloyan Raev)
|
||||
* Union
|
||||
* Fix writing with the all policy (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Fix directory creation with 4shared (Nick Craig-Wood)
|
||||
|
||||
## v1.52.3 - 2020-08-07
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.52.2...v1.52.3)
|
||||
|
||||
@@ -39,14 +39,14 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping already copied.
|
||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy url content to dest.
|
||||
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Cryptcheck checks the integrity of a crypted remote.
|
||||
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Cryptdecode returns unencrypted file names.
|
||||
* [rclone dedupe](/commands/rclone_dedupe/) - Interactively find duplicate files and delete/rename them.
|
||||
* [rclone dedupe](/commands/rclone_dedupe/) - Interactively find duplicate filenames and delete/rename them.
|
||||
* [rclone delete](/commands/rclone_delete/) - Remove the contents of path.
|
||||
* [rclone deletefile](/commands/rclone_deletefile/) - Remove a single file from remote.
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
|
||||
@@ -56,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone listremotes](/commands/rclone_listremotes/) - List all the remotes in the config file.
|
||||
* [rclone ls](/commands/rclone_ls/) - List the objects in the path with size and path.
|
||||
* [rclone lsd](/commands/rclone_lsd/) - List all directories/containers/buckets in the path.
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing
|
||||
* [rclone lsf](/commands/rclone_lsf/) - List directories and objects in remote:path formatted for parsing.
|
||||
* [rclone lsjson](/commands/rclone_lsjson/) - List directories and objects in the path in JSON format.
|
||||
* [rclone lsl](/commands/rclone_lsl/) - List the objects in path with modification time, size and path.
|
||||
* [rclone md5sum](/commands/rclone_md5sum/) - Produces an md5sum file for all the objects in the path.
|
||||
@@ -65,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone move](/commands/rclone_move/) - Move files from source to dest.
|
||||
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
|
||||
* [rclone ncdu](/commands/rclone_ncdu/) - Explore a remote with a text based user interface.
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
|
||||
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone config file.
|
||||
* [rclone purge](/commands/rclone_purge/) - Remove the path and all of its contents.
|
||||
* [rclone rc](/commands/rclone_rc/) - Run a command against a running rclone.
|
||||
* [rclone rcat](/commands/rclone_rcat/) - Copies standard input to file on remote.
|
||||
|
||||
@@ -24,9 +24,26 @@ both remotes and check them against each other on the fly. This can
|
||||
be useful for remotes that don't support hashes or if you really want
|
||||
to check all the data.
|
||||
|
||||
If you supply the --one-way flag, it will only check that files in source
|
||||
match the files in destination, not the other way around. Meaning extra files in
|
||||
destination that are not in the source will not trigger an error.
|
||||
If you supply the `--one-way` flag, it will only check that files in
|
||||
the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
present on both the source and destination but different.
|
||||
|
||||
The `--combined` flag will write a file (or stdout) which contains all
|
||||
file paths with a symbol and then a space and then the path to tell
|
||||
you what happened to it. These are reminiscent of diff files.
|
||||
|
||||
- `= path` means path was found in source and destination and was identical
|
||||
- `- path` means path was missing on the source, so only in the destination
|
||||
- `+ path` means path was missing on the destination, so only in the source
|
||||
- `* path` means path was present in source and destination but different.
|
||||
- `! path` means there was an error reading or hashing the source or dest.
|
||||
|
||||
|
||||
```
|
||||
@@ -36,9 +53,15 @@ rclone check source:path dest:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--download Check by downloading rather than with hash.
|
||||
-h, --help help for check
|
||||
--one-way Check one way only, source files must exist on remote
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--download Check by downloading rather than with hash.
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for check
|
||||
--match string Report all matching files to this file
|
||||
--missing-on-dst string Report all files missing from the destination to this file
|
||||
--missing-on-src string Report all files missing from the source to this file
|
||||
--one-way Check one way only, source files must exist on remote
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone cleanup"
|
||||
description: "Clean up the remote if possible"
|
||||
description: "Clean up the remote if possible."
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone cleanup
|
||||
|
||||
Clean up the remote if possible
|
||||
Clean up the remote if possible.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copy"
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copy
|
||||
|
||||
Copy files from source to dest, skipping already copied
|
||||
Copy files from source to dest, skipping already copied.
|
||||
|
||||
## Synopsis
|
||||
|
||||
@@ -59,7 +59,9 @@ recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the `--dry-run` or the `--interactive`/`-i` flag to test without copying anything.
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone copyto"
|
||||
description: "Copy files from source to dest, skipping already copied"
|
||||
description: "Copy files from source to dest, skipping already copied."
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copyto
|
||||
|
||||
Copy files from source to dest, skipping already copied
|
||||
Copy files from source to dest, skipping already copied.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -35,9 +35,26 @@ the files in remote:path.
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
|
||||
If you supply the --one-way flag, it will only check that files in source
|
||||
match the files in destination, not the other way around. Meaning extra files in
|
||||
destination that are not in the source will not trigger an error.
|
||||
If you supply the `--one-way` flag, it will only check that files in
|
||||
the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match`
|
||||
and `--error` flags write paths, one per line, to the file name (or
|
||||
stdout if it is `-`) supplied. What they write is described in the
|
||||
help below. For example `--differ` will write all paths which are
|
||||
present on both the source and destination but different.
|
||||
|
||||
The `--combined` flag will write a file (or stdout) which contains all
|
||||
file paths with a symbol and then a space and then the path to tell
|
||||
you what happened to it. These are reminiscent of diff files.
|
||||
|
||||
- `= path` means path was found in source and destination and was identical
|
||||
- `- path` means path was missing on the source, so only in the destination
|
||||
- `+ path` means path was missing on the destination, so only in the source
|
||||
- `* path` means path was present in source and destination but different.
|
||||
- `! path` means there was an error reading or hashing the source or dest.
|
||||
|
||||
|
||||
```
|
||||
@@ -47,8 +64,14 @@ rclone cryptcheck remote:path cryptedremote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for cryptcheck
|
||||
--one-way Check one way only, source files must exist on destination
|
||||
--combined string Make a combined report of changes to this file
|
||||
--differ string Report all non-matching files to this file
|
||||
--error string Report all files with errors (hashing or reading) to this file
|
||||
-h, --help help for cryptcheck
|
||||
--match string Report all matching files to this file
|
||||
--missing-on-dst string Report all files missing from the destination to this file
|
||||
--missing-on-src string Report all files missing from the source to this file
|
||||
--one-way Check one way only, source files must exist on remote
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -1,29 +1,44 @@
|
||||
---
|
||||
title: "rclone dedupe"
|
||||
description: "Interactively find duplicate files and delete/rename them."
|
||||
description: "Interactively find duplicate filenames and delete/rename them."
|
||||
slug: rclone_dedupe
|
||||
url: /commands/rclone_dedupe/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/dedupe/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone dedupe
|
||||
|
||||
Interactively find duplicate files and delete/rename them.
|
||||
Interactively find duplicate filenames and delete/rename them.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
By default `dedupe` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
By default `dedupe` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different.
|
||||
|
||||
This is only useful with backends like Google Drive which can have
|
||||
duplicate file names. It can be run on wrapping backends (eg crypt) if
|
||||
they wrap a backend which supports duplicate file names.
|
||||
|
||||
In the first pass it will merge directories with the same name. It
|
||||
will do this iteratively until all the identical directories have been
|
||||
merged.
|
||||
will do this iteratively until all the identically named directories
|
||||
have been merged.
|
||||
|
||||
The `dedupe` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the `dedupe` command will not be interactive. You
|
||||
can use `--dry-run` to see what would happen without doing anything.
|
||||
In the second pass, for every group of duplicate file names, it will
|
||||
delete all but one identical files it finds without confirmation.
|
||||
This means that for most duplicated files the `dedupe`
|
||||
command will not be interactive.
|
||||
|
||||
`dedupe` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the `--size-only` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
Here is an example run.
|
||||
|
||||
@@ -42,22 +57,22 @@ Now the `dedupe` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: Found 3 files with duplicates names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
|
||||
@@ -35,6 +35,9 @@ Then delete
|
||||
That reads "delete everything with a minimum size of 100 MB", hence
|
||||
delete all files bigger than 100MBytes.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
|
||||
```
|
||||
rclone delete remote:path [flags]
|
||||
|
||||
@@ -11,16 +11,27 @@ Generate public link to file/folder.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
rclone link will create or retrieve a public link to the given file or folder.
|
||||
rclone link will create, retrieve or remove a public link to the given
|
||||
file or folder.
|
||||
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
rclone link --unlink remote:path/to/folder/
|
||||
rclone link --expire 1d remote:path/to/file
|
||||
|
||||
If successful, the last line of the output will contain the link. Exact
|
||||
capabilities depend on the remote, but the link will always be created with
|
||||
the least constraints – e.g. no expiry, no password protection, accessible
|
||||
without account.
|
||||
If you supply the --expire flag, it will set the expiration time
|
||||
otherwise it will use the default (100 years). **Note** not all
|
||||
backends support the --expire flag - if the backend doesn't support it
|
||||
then the link returned won't expire.
|
||||
|
||||
Use the --unlink flag to remove existing public links to the file or
|
||||
folder. **Note** not all backends support "--unlink" flag - those that
|
||||
don't will just ignore it.
|
||||
|
||||
If successful, the last line of the output will contain the
|
||||
link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
|
||||
|
||||
```
|
||||
@@ -30,7 +41,9 @@ rclone link remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for link
|
||||
--expire Duration The amount of time that the link will be valid (default 100y)
|
||||
-h, --help help for link
|
||||
--unlink Remove existing public link to file/folder
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title: "rclone lsf"
|
||||
description: "List directories and objects in remote:path formatted for parsing"
|
||||
description: "List directories and objects in remote:path formatted for parsing."
|
||||
slug: rclone_lsf
|
||||
url: /commands/rclone_lsf/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone lsf
|
||||
|
||||
List directories and objects in remote:path formatted for parsing
|
||||
List directories and objects in remote:path formatted for parsing.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
@@ -49,6 +49,9 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of `rclone` 1.52.2, `rclone mount` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
## Installing on Windows
|
||||
|
||||
To run rclone mount on Windows, you will need to
|
||||
@@ -191,23 +194,39 @@ parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
## VFS - Virtual File System
|
||||
|
||||
## Directory Cache
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -220,40 +239,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -265,9 +285,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -276,7 +297,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -292,7 +313,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -310,32 +331,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -346,7 +412,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -362,7 +428,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -403,9 +469,11 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
--volname string Set the volume name (not supported by all OSes).
|
||||
--write-back-cache Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.
|
||||
|
||||
@@ -34,7 +34,7 @@ option when moving a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ modification time or MD5SUM. src will be deleted on successful
|
||||
transfer.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
--dry-run flag.
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
|
||||
|
||||
|
||||
@@ -1,17 +1,38 @@
|
||||
---
|
||||
title: "rclone obscure"
|
||||
description: "Obscure password for use in the rclone.conf"
|
||||
description: "Obscure password for use in the rclone config file."
|
||||
slug: rclone_obscure
|
||||
url: /commands/rclone_obscure/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/obscure/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone obscure
|
||||
|
||||
Obscure password for use in the rclone.conf
|
||||
Obscure password for use in the rclone config file.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Obscure password for use in the rclone.conf
|
||||
In the rclone config file, human readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
passwords as rclone can decrypt them - it is to prevent "eyedropping"
|
||||
- namely someone seeing a password in the rclone config file by
|
||||
accident.
|
||||
|
||||
Many equally important things (like access tokens) are not obscured in
|
||||
the config file. However it is very hard to shoulder surf a 64
|
||||
character hex token.
|
||||
|
||||
This command can also accept a password through STDIN instead of an
|
||||
argument by passing a hyphen as an argument. Example:
|
||||
|
||||
echo "secretpassword" | rclone obscure -
|
||||
|
||||
If there is no data on STDIN to read, rclone obscure will default to
|
||||
obfuscating the hyphen itself.
|
||||
|
||||
If you want to encrypt the config file then please use config file
|
||||
encryption - see [rclone config](/commands/rclone_config/) for more
|
||||
info.
|
||||
|
||||
```
|
||||
rclone obscure password [flags]
|
||||
|
||||
@@ -16,6 +16,9 @@ Remove the path and all of its contents. Note that this does not obey
|
||||
include/exclude filters - everything will be removed. Use `delete` if
|
||||
you want to selectively delete files.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
|
||||
```
|
||||
rclone purge remote:path [flags]
|
||||
|
||||
@@ -23,30 +23,49 @@ players might show files that they are not able to play back correctly.
|
||||
|
||||
## Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
Use `--addr` to specify which IP address and port the server should
|
||||
listen on, eg `--addr 1.2.3.4:8000` or `--addr :8080` to listen to all
|
||||
IPs.
|
||||
|
||||
Use --name to choose the friendly server name, which is by
|
||||
Use `--name` to choose the friendly server name, which is by
|
||||
default "rclone (hostname)".
|
||||
|
||||
Use --log-trace in conjunction with -vv to enable additional debug
|
||||
Use `--log-trace` in conjunction with `-vv` to enable additional debug
|
||||
logging of all UPNP traffic.
|
||||
|
||||
## Directory Cache
|
||||
## VFS - Virtual File System
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -59,40 +78,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -104,9 +124,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -115,7 +136,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -131,7 +152,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -149,32 +170,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -185,7 +251,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -201,7 +267,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -233,9 +299,11 @@ rclone serve dlna remote:path [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -32,20 +32,39 @@ By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
|
||||
## Directory Cache
|
||||
## VFS - Virtual File System
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -58,40 +77,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -103,9 +123,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -114,7 +135,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -130,7 +151,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -148,32 +169,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -184,7 +250,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -200,7 +266,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -316,9 +382,11 @@ rclone serve ftp remote:path [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -104,20 +104,39 @@ of that with the CA certificate. --key should be the PEM encoded
|
||||
private key and --client-ca should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
## Directory Cache
|
||||
## VFS - Virtual File System
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -130,40 +149,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -175,9 +195,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -186,7 +207,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -202,7 +223,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -220,32 +241,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -256,7 +322,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -272,7 +338,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -314,9 +380,11 @@ rclone serve http remote:path [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -43,20 +43,39 @@ Note that the default of "--vfs-cache-mode off" is fine for the rclone
|
||||
sftp backend, but it may not be with other SFTP clients.
|
||||
|
||||
|
||||
## Directory Cache
|
||||
## VFS - Virtual File System
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -69,40 +88,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -114,9 +134,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -125,7 +146,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -141,7 +162,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -159,32 +180,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -195,7 +261,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -211,7 +277,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -328,9 +394,11 @@ rclone serve sftp remote:path [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -112,20 +112,39 @@ of that with the CA certificate. --key should be the PEM encoded
|
||||
private key and --client-ca should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
## Directory Cache
|
||||
## VFS - Virtual File System
|
||||
|
||||
Using the `--dir-cache-time` flag, you can set how long a
|
||||
This command uses the VFS layer. This adapts the cloud storage objects
|
||||
that rclone uses into something which looks much more like a disk
|
||||
filing system.
|
||||
|
||||
Cloud storage objects have lots of properties which aren't like disk
|
||||
files - you can't extend them or write to the middle of them, so the
|
||||
VFS layer has to deal with that. Because there is no one right way of
|
||||
doing this there are various options explained below.
|
||||
|
||||
The VFS layer also implements a directory cache - this caches info
|
||||
about files and directories (but not the data) in memory.
|
||||
|
||||
## VFS Directory Cache
|
||||
|
||||
Using the `--dir-cache-time` flag, you can control how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
backend. Changes made through the mount will appear immediately or
|
||||
invalidate the cache.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
Assuming only one rclone instance is running, you can reset the cache
|
||||
like this:
|
||||
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
|
||||
--poll-interval duration Time to wait between polling for changes.
|
||||
|
||||
However, changes made directly on the cloud storage by the web
|
||||
interface or a different copy of rclone will only be picked up once
|
||||
the directory cache expires if the backend configured does not support
|
||||
polling for changes. If the backend supports polling, changes will be
|
||||
picked up within the polling interval.
|
||||
|
||||
You can send a `SIGHUP` signal to rclone for it to flush all
|
||||
directory caches, regardless of how old they are. Assuming only one
|
||||
rclone instance is running, you can reset the cache like this:
|
||||
|
||||
kill -SIGHUP $(pidof rclone)
|
||||
|
||||
@@ -138,40 +157,41 @@ Or individual files or directories:
|
||||
|
||||
rclone rc vfs/forget file=path/to/file dir=path/to/dir
|
||||
|
||||
## File Buffering
|
||||
## VFS File Buffering
|
||||
|
||||
The `--buffer-size` flag determines the amount of memory,
|
||||
that will be used to buffer data in advance.
|
||||
|
||||
Each open file descriptor will try to keep the specified amount of
|
||||
data in memory at all times. The buffered data is bound to one file
|
||||
descriptor and won't be shared between multiple open file descriptors
|
||||
of the same file.
|
||||
Each open file will try to keep the specified amount of data in memory
|
||||
at all times. The buffered data is bound to one open file and won't be
|
||||
shared.
|
||||
|
||||
This flag is a upper limit for the used memory per open file. The
|
||||
buffer will only use memory for data that is downloaded but not not
|
||||
yet read. If the buffer is empty, only a small amount of memory will
|
||||
be used.
|
||||
|
||||
This flag is a upper limit for the used memory per file descriptor.
|
||||
The buffer will only use memory for data that is downloaded but not
|
||||
not yet read. If the buffer is empty, only a small amount of memory
|
||||
will be used.
|
||||
The maximum memory used by rclone for buffering can be up to
|
||||
`--buffer-size * open files`.
|
||||
|
||||
## File Caching
|
||||
## VFS File Caching
|
||||
|
||||
These flags control the VFS file caching options. The VFS layer is
|
||||
used by rclone mount to make a cloud storage system work more like a
|
||||
normal file system.
|
||||
These flags control the VFS file caching options. File caching is
|
||||
necessary to make the VFS layer appear compatible with a normal file
|
||||
system. It can be disabled at the cost of some compatibility.
|
||||
|
||||
You'll need to enable VFS caching if you want, for example, to read
|
||||
and write simultaneously to a file. See below for more details.
|
||||
For example you'll need to enable VFS caching if you want to read and
|
||||
write simultaneously to a file. See below for more details.
|
||||
|
||||
Note that the VFS cache works in addition to the cache backend and you
|
||||
may find that you need one or the other or both.
|
||||
Note that the VFS cache is separate from the cache backend and you may
|
||||
find that you need one or the other or both.
|
||||
|
||||
--cache-dir string Directory rclone will use for caching.
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
|
||||
--vfs-cache-mode string Cache mode off|minimal|writes|full (default "off")
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-cache-max-size int Max total size of objects in the cache. (default off)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
|
||||
If run with `-vv` rclone will print the location of the file cache. The
|
||||
files are stored in the user cache file area which is OS dependent but
|
||||
@@ -183,9 +203,10 @@ The higher the cache mode the more compatible rclone becomes at the
|
||||
cost of using disk space.
|
||||
|
||||
Note that files are written back to the remote only when they are
|
||||
closed so if rclone is quit or dies with open files then these won't
|
||||
get written back to the remote. However they will still be in the on
|
||||
disk cache.
|
||||
closed and if they haven't been accessed for --vfs-write-back
|
||||
second. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using --vfs-cache-max-size note that the cache may exceed this size
|
||||
for two reasons. Firstly because it is only checked every
|
||||
@@ -194,7 +215,7 @@ evicted from the cache.
|
||||
|
||||
### --vfs-cache-mode off
|
||||
|
||||
In this mode the cache will read directly from the remote and write
|
||||
In this mode (the default) the cache will read directly from the remote and write
|
||||
directly to the remote without caching anything on disk.
|
||||
|
||||
This will mean some operations are not possible
|
||||
@@ -210,7 +231,7 @@ This will mean some operations are not possible
|
||||
### --vfs-cache-mode minimal
|
||||
|
||||
This is very similar to "off" except that files opened for read AND
|
||||
write will be buffered to disks. This means that files opened for
|
||||
write will be buffered to disk. This means that files opened for
|
||||
write will be a lot more compatible, but uses the minimal disk space.
|
||||
|
||||
These operations are not possible
|
||||
@@ -228,32 +249,77 @@ first.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
|
||||
If an upload fails it will be retried up to --low-level-retries times.
|
||||
If an upload fails it will be retried at exponentially increasing
|
||||
intervals up to 1 minute.
|
||||
|
||||
### --vfs-cache-mode full
|
||||
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
a file is opened for read it will be downloaded in its entirety first.
|
||||
In this mode all reads and writes are buffered to and from disk. When
|
||||
data is read from the remote this is buffered to disk as well.
|
||||
|
||||
This may be appropriate for your needs, or you may prefer to look at
|
||||
the cache backend which does a much more sophisticated job of caching,
|
||||
including caching directory hierarchies and chunks of files.
|
||||
In this mode the files in the cache will be sparse files and rclone
|
||||
will keep track of which bits of the files it has dowloaded.
|
||||
|
||||
In this mode, unlike the others, when a file is written to the disk,
|
||||
it will be kept on the disk after it is written to the remote. It
|
||||
will be purged on a schedule according to `--vfs-cache-max-age`.
|
||||
So if an application only reads the starts of each file, then rclone
|
||||
will only buffer the start of the file. These files will appear to be
|
||||
their full size in the cache, but they will be sparse files with only
|
||||
the data that has been downloaded present in them.
|
||||
|
||||
This mode should support all normal file system operations.
|
||||
This mode should support all normal file system operations and is
|
||||
otherwise identical to --vfs-cache-mode writes.
|
||||
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
When reading a file rclone will read --buffer-size plus
|
||||
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
|
||||
whereas the --vfs-read-ahead is buffered on disk.
|
||||
|
||||
## Case Sensitivity
|
||||
When using this mode it is recommended that --buffer-size is not set
|
||||
too big and --vfs-read-ahead is set large if required.
|
||||
|
||||
**IMPORTANT** not all file systems support sparse files. In particular
|
||||
FAT/exFAT do not. Rclone will perform very badly if the cache
|
||||
directory is on a filesystem which doesn't support sparse files and it
|
||||
will log an ERROR message if one is detected.
|
||||
|
||||
## VFS Performance
|
||||
|
||||
These flags may be used to enable/disable features of the VFS for
|
||||
performance or other reasons.
|
||||
|
||||
In particular S3 and Swift benefit hugely from the --no-modtime flag
|
||||
(or use --use-server-modtime for a slightly different effect) as each
|
||||
read of the modification time takes a transaction.
|
||||
|
||||
--no-checksum Don't compare checksums on up/download.
|
||||
--no-modtime Don't read/write the modification time (can speed things up).
|
||||
--no-seek Don't allow seeking in files.
|
||||
--read-only Mount read-only.
|
||||
|
||||
When rclone reads files from a remote it reads them in chunks. This
|
||||
means that rather than requesting the whole file rclone reads the
|
||||
chunk specified. This is advantageous because some cloud providers
|
||||
account for reads being all the data requested, not all the data
|
||||
delivered.
|
||||
|
||||
Rclone will keep doubling the chunk size requested starting at
|
||||
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
|
||||
unless it is set to "off" in which case there will be no limit.
|
||||
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
|
||||
|
||||
Sometimes rclone is delivered reads or writes out of order. Rather
|
||||
than seeking rclone will wait a short time for the in sequence read or
|
||||
write to come in. These flags only come into effect when not using an
|
||||
on disk cache file.
|
||||
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
|
||||
## VFS Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
@@ -264,7 +330,7 @@ file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
file system as-is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
@@ -280,7 +346,7 @@ Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
If the flag is not provided on the command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and macOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
@@ -406,9 +472,11 @@ rclone serve webdav remote:path [flags]
|
||||
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
|
||||
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
|
||||
--vfs-case-insensitive If a file name not found, find a case insensitive match.
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
|
||||
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
|
||||
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
|
||||
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
|
||||
```
|
||||
|
||||
|
||||
@@ -18,7 +18,9 @@ modification time or MD5SUM. Destination is updated to match
|
||||
source, including deleting files if necessary.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` flag to see exactly what would be copied and deleted.
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
rclone sync -i SOURCE remote:DESTINATION
|
||||
|
||||
Note that files in the destination won't be deleted if there were any
|
||||
errors at any point.
|
||||
|
||||
@@ -23,6 +23,7 @@ time instead of the current time. Times may be specified as one of:
|
||||
|
||||
- 'YYMMDD' - eg. 17.10.30
|
||||
- 'YYYY-MM-DDTHH:MM:SS' - eg. 2006-01-02T15:04:05
|
||||
- 'YYYY-MM-DDTHH:MM:SS.SSS' - eg. 2006-01-02T15:04:05.123456789
|
||||
|
||||
Note that --timestamp is in UTC if you want local time then add the
|
||||
--localtime flag.
|
||||
|
||||
@@ -52,7 +52,7 @@ rclone tree remote:path [flags]
|
||||
--human Print the size in a more human readable way.
|
||||
--level int Descend only level directories deep.
|
||||
-D, --modtime Print the date of last modification.
|
||||
-i, --noindent Don't print indentation lines.
|
||||
--noindent Don't print indentation lines.
|
||||
--noreport Turn off file/directory count at end of tree listing.
|
||||
-o, --output string Output to file instead of stdout.
|
||||
-p, --protections Print the protections for each file.
|
||||
|
||||
@@ -6,23 +6,26 @@ description: "Encryption overlay remote"
|
||||
{{< icon "fa fa-lock" >}}Crypt
|
||||
----------------------------------------
|
||||
|
||||
The `crypt` remote encrypts and decrypts another remote.
|
||||
Rclone `crypt` remotes encrypt and decrypt other remotes.
|
||||
|
||||
To use it first set up the underlying remote following the config
|
||||
instructions for that remote. You can also use a local pathname
|
||||
instead of a remote which will encrypt and decrypt from that directory
|
||||
which might be useful for encrypting onto a USB stick for example.
|
||||
To use `crypt`, first set up the underlying remote. Follow the `rclone
|
||||
config` instructions for that remote.
|
||||
|
||||
First check your chosen remote is working - we'll call it
|
||||
`remote:path` in these docs. Note that anything inside `remote:path`
|
||||
will be encrypted and anything outside won't. This means that if you
|
||||
are using a bucket based remote (eg S3, B2, swift) then you should
|
||||
probably put the bucket in the remote `s3:bucket`. If you just use
|
||||
`s3:` then rclone will make encrypted bucket names too (if using file
|
||||
name encryption) which may or may not be what you want.
|
||||
`crypt` applied to a local pathname instead of a remote will
|
||||
encrypt and decrypt that directory, and can be used to encrypt USB
|
||||
removable drives.
|
||||
|
||||
Now configure `crypt` using `rclone config`. We will call this one
|
||||
`secret` to differentiate it from the `remote`.
|
||||
Before configuring the crypt remote, check the underlying remote is
|
||||
working. In this example the underlying remote is called `remote:path`.
|
||||
Anything inside `remote:path` will be encrypted and anything outside
|
||||
will not. In the case of an S3 based underlying remote (eg Amazon S3,
|
||||
B2, Swift) it is generally advisable to define a crypt remote in the
|
||||
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
|
||||
file name encryption, rclone will encrypt the bucket name.
|
||||
|
||||
Configure `crypt` using `rclone config`. In this example the `crypt`
|
||||
remote is called `secret`, to differentiate it from the underlying
|
||||
`remote`.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
@@ -96,49 +99,42 @@ d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
**Important** The password is stored in the config file is lightly
|
||||
obscured so it isn't immediately obvious what it is. It is in no way
|
||||
secure unless you use config file encryption.
|
||||
**Important** The crypt password stored in `rclone.conf` is lightly
|
||||
obscured. That only protects it from cursory inspection. It is not
|
||||
secure unless encryption of `rclone.conf` is specified.
|
||||
|
||||
A long passphrase is recommended, or you can use a random one.
|
||||
A long passphrase is recommended, or `rclone config` can generate a
|
||||
random one.
|
||||
|
||||
The obscured password is created by using AES-CTR with a static key, with
|
||||
the salt stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared by between all versions of rclone.
|
||||
The obscured password is created using AES-CTR with a static key. The
|
||||
salt is stored verbatim at the beginning of the obscured password. This
|
||||
static key is shared between all versions of rclone.
|
||||
|
||||
If you reconfigure rclone with the same passwords/passphrases
|
||||
elsewhere it will be compatible, but the obscured version will be different
|
||||
due to the different salt.
|
||||
|
||||
Note that rclone does not encrypt
|
||||
Rclone does not encrypt
|
||||
|
||||
* file length - this can be calculated within 16 bytes
|
||||
* modification time - used for syncing
|
||||
|
||||
## Specifying the remote ##
|
||||
|
||||
In normal use, make sure the remote has a `:` in. If you specify the
|
||||
remote without a `:` then rclone will use a local directory of that
|
||||
name. So if you use a remote of `/path/to/secret/files` then rclone
|
||||
will encrypt stuff to that directory. If you use a remote of `name`
|
||||
then rclone will put files in a directory called `name` in the current
|
||||
directory.
|
||||
In normal use, ensure the remote has a `:` in. If specified without,
|
||||
rclone uses a local directory of that name. For example if a remote
|
||||
`/path/to/secret/files` is specified, rclone encrypts content to that
|
||||
directory. If a remote `name` is specified, rclone targets a directory
|
||||
`name` in the current directory.
|
||||
|
||||
If you specify the remote as `remote:path/to/dir` then rclone will
|
||||
store encrypted files in `path/to/dir` on the remote. If you are using
|
||||
file name encryption, then when you save files to
|
||||
`secret:subdir/subfile` this will store them in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
|
||||
|
||||
Note that unless you want encrypted bucket names (which are difficult
|
||||
to manage because you won't know what directory they represent in web
|
||||
interfaces etc), you should probably specify a bucket, eg
|
||||
`remote:secretbucket` when using bucket based remotes such as S3,
|
||||
Swift, Hubic, B2, GCS.
|
||||
If remote `remote:path/to/dir` is specified, rclone stores encrypted
|
||||
files in `path/to/dir` on the remote. With file name encryption, files
|
||||
saved to `secret:subdir/subfile` are stored in the unencrypted path
|
||||
`path/to/dir` but the `subdir/subpath` element is encrypted.
|
||||
|
||||
## Example ##
|
||||
|
||||
To test I made a little directory of files using "standard" file name
|
||||
Create the following file structure using "standard" file name
|
||||
encryption.
|
||||
|
||||
```
|
||||
@@ -152,7 +148,7 @@ plaintext/
|
||||
└── file4.txt
|
||||
```
|
||||
|
||||
Copy these to the remote and list them back
|
||||
Copy these to the remote, and list them
|
||||
|
||||
```
|
||||
$ rclone -q copy plaintext secret:
|
||||
@@ -164,7 +160,7 @@ $ rclone -q ls secret:
|
||||
9 subdir/file3.txt
|
||||
```
|
||||
|
||||
Now see what that looked like when encrypted
|
||||
The crypt remote looks like
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -175,7 +171,7 @@ $ rclone -q ls remote:path
|
||||
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
|
||||
```
|
||||
|
||||
Note that this retains the directory structure which means you can do this
|
||||
The directory structure is preserved
|
||||
|
||||
```
|
||||
$ rclone -q ls secret:subdir
|
||||
@@ -184,9 +180,9 @@ $ rclone -q ls secret:subdir
|
||||
10 subsubdir/file4.txt
|
||||
```
|
||||
|
||||
If don't use file name encryption then the remote will look like this
|
||||
- note the `.bin` extensions added to prevent the cloud provider
|
||||
attempting to interpret the data.
|
||||
Without file name encryption `.bin` extensions are added to underlying
|
||||
names. This prevents the cloud provider attempting to interpret file
|
||||
content.
|
||||
|
||||
```
|
||||
$ rclone -q ls remote:path
|
||||
@@ -199,8 +195,6 @@ $ rclone -q ls remote:path
|
||||
|
||||
### File name encryption modes ###
|
||||
|
||||
Here are some of the features of the file name encryption modes
|
||||
|
||||
Off
|
||||
|
||||
* doesn't hide file names or directory structure
|
||||
@@ -219,17 +213,19 @@ Standard
|
||||
Obfuscation
|
||||
|
||||
This is a simple "rotate" of the filename, with each file having a rot
|
||||
distance based on the filename. We store the distance at the beginning
|
||||
of the filename. So a file called "hello" may become "53.jgnnq".
|
||||
distance based on the filename. Rclone stores the distance at the
|
||||
beginning of the filename. A file called "hello" may become "53.jgnnq".
|
||||
|
||||
This is not a strong encryption of filenames, but it may stop automated
|
||||
scanning tools from picking up on filename patterns. As such it's an
|
||||
intermediate between "off" and "standard". The advantage is that it
|
||||
allows for longer path segment names.
|
||||
Obfuscation is not a strong encryption of filenames, but hinders
|
||||
automated scanning tools picking up on filename patterns. It is an
|
||||
intermediate between "off" and "standard" which allows for longer path
|
||||
segment names.
|
||||
|
||||
There is a possibility with some unicode based filenames that the
|
||||
obfuscation is weak and may map lower case characters to upper case
|
||||
equivalents. You can not rely on this for strong protection.
|
||||
equivalents.
|
||||
|
||||
Obfuscation cannot be relied upon for strong protection.
|
||||
|
||||
* file names very lightly obfuscated
|
||||
* file names can be longer than standard encryption
|
||||
@@ -237,13 +233,14 @@ equivalents. You can not rely on this for strong protection.
|
||||
* directory structure visible
|
||||
* identical files names will have identical uploaded names
|
||||
|
||||
Cloud storage systems have various limits on file name length and
|
||||
total path length which you are more likely to hit using "Standard"
|
||||
file name encryption. If you keep your file names to below 156
|
||||
characters in length then you should be OK on all providers.
|
||||
Cloud storage systems have limits on file name length and
|
||||
total path length which rclone is more likely to breach using
|
||||
"Standard" file name encryption. Where file names are less thn 156
|
||||
characters in length issues should not be encountered, irrespective of
|
||||
cloud storage provider.
|
||||
|
||||
There may be an even more secure file name encryption mode in the
|
||||
future which will address the long file name problem.
|
||||
An alternative, future rclone file name encryption mode may tolerate
|
||||
backend provider path length limits.
|
||||
|
||||
### Directory name encryption ###
|
||||
Crypt offers the option of encrypting dir names or leaving them intact.
|
||||
@@ -269,10 +266,10 @@ Example:
|
||||
Crypt stores modification times using the underlying remote so support
|
||||
depends on that.
|
||||
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
Hashes are not stored for crypt. However the data integrity is
|
||||
protected by an extremely strong crypto authenticator.
|
||||
|
||||
Note that you should use the `rclone cryptcheck` command to check the
|
||||
Use the `rclone cryptcheck` command to check the
|
||||
integrity of a crypted remote instead of `rclone check` which can't
|
||||
check the checksums properly.
|
||||
|
||||
@@ -328,6 +325,8 @@ NB If filename_encryption is "off" then this option will do nothing.
|
||||
|
||||
Password or pass phrase for encryption.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_CRYPT_PASSWORD
|
||||
- Type: string
|
||||
@@ -338,6 +337,8 @@ Password or pass phrase for encryption.
|
||||
Password or pass phrase for salt. Optional but recommended.
|
||||
Should be different to the previous password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
- Config: password2
|
||||
- Env Var: RCLONE_CRYPT_PASSWORD2
|
||||
- Type: string
|
||||
@@ -347,6 +348,24 @@ Should be different to the previous password.
|
||||
|
||||
Here are the advanced options specific to crypt (Encrypt/Decrypt a remote).
|
||||
|
||||
#### --crypt-server-side-across-configs
|
||||
|
||||
Allow server side operations (eg copy) to work across different crypt configs.
|
||||
|
||||
Normally this option is not what you want, but if you have two crypts
|
||||
pointing to the same backend you can use it.
|
||||
|
||||
This can be used, for example, to change file name encryption type
|
||||
without re-uploading all the data. Just make two crypt backends
|
||||
pointing to two different directories with the single changed
|
||||
parameter and use rclone move to move the files between the crypt
|
||||
remotes.
|
||||
|
||||
- Config: server_side_across_configs
|
||||
- Env Var: RCLONE_CRYPT_SERVER_SIDE_ACROSS_CONFIGS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --crypt-show-mapping
|
||||
|
||||
For all files listed show how the names encrypt.
|
||||
|
||||
@@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
|
||||
combination with the `-v` flag. See the [Logging section](#logging)
|
||||
for more info.
|
||||
|
||||
If FILE exists then rclone will append to it.
|
||||
|
||||
Note that if you are using the `logrotate` program to manage rclone's
|
||||
logs, then you should use the `copytruncate` option as rclone doesn't
|
||||
have a signal to rotate logs.
|
||||
@@ -1251,11 +1253,17 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
||||
|
||||
For example
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak
|
||||
rclone copy -i /path/to/local/file remote:current --suffix .bak
|
||||
|
||||
will sync `/path/to/local` to `remote:current`, but for any files
|
||||
will copy `/path/to/local` to `remote:current`, but for any files
|
||||
which would have been updated or deleted have .bak added.
|
||||
|
||||
If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
||||
it is recommended to put a filter rule in excluding the suffix
|
||||
otherwise the `sync` will delete the backup files.
|
||||
|
||||
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||
|
||||
### --suffix-keep-extension ###
|
||||
|
||||
When using `--suffix`, setting this causes rclone put the SUFFIX
|
||||
@@ -1328,8 +1336,10 @@ will be considered.
|
||||
|
||||
If the destination does not support server-side copy or move, rclone
|
||||
will fall back to the default behaviour and log an error level message
|
||||
to the console. Note: Encrypted destinations are not supported
|
||||
by `--track-renames`.
|
||||
to the console.
|
||||
|
||||
Encrypted destinations are not currently supported by `--track-renames`
|
||||
if `--track-renames-strategy` includes `hash`.
|
||||
|
||||
Note that `--track-renames` is incompatible with `--no-traverse` and
|
||||
that it uses extra memory to keep track of all the rename candidates.
|
||||
@@ -1358,6 +1368,8 @@ Using `--track-renames-strategy modtime` or `leaf` can enable
|
||||
|
||||
If nothing is specified, the default option is matching by `hash`es.
|
||||
|
||||
Note that the `hash` strategy is not supported with encrypted destinations.
|
||||
|
||||
### --delete-(before,during,after) ###
|
||||
|
||||
This option allows you to specify when files on your destination are
|
||||
|
||||
@@ -38,16 +38,25 @@ Beta releases
|
||||
[Beta releases](https://beta.rclone.org) are generated from each commit
|
||||
to master. Note these are named like
|
||||
|
||||
{Version Tag}-{Commit Number}-g{Git Commit Hash}
|
||||
{Version Tag}.beta.{Commit Number}.{Git Commit Hash}
|
||||
|
||||
You can match the `Git Commit Hash` up with the [git
|
||||
log](https://github.com/rclone/rclone/commits/master). The most recent
|
||||
release will have the largest `Version Tag` and `Commit Number` and
|
||||
will normally be at the end of the list.
|
||||
eg
|
||||
|
||||
v1.53.0-beta.4677.b657a2204
|
||||
|
||||
The `Version Tag` is the version that the beta release will become
|
||||
when it is released. You can match the `Git Commit Hash` up with the
|
||||
[git log](https://github.com/rclone/rclone/commits/master). The most
|
||||
recent release will have the largest `Version Tag` and `Commit Number`
|
||||
and will normally be at the end of the list.
|
||||
|
||||
Some beta releases may have a branch name also:
|
||||
|
||||
{Version Tag}-{Commit Number}-g{Git Commit Hash}-{Branch Name}
|
||||
{Version Tag}-beta.{Commit Number}.{Git Commit Hash}.{Branch Name}
|
||||
|
||||
eg
|
||||
|
||||
v1.53.0-beta.4677.b657a2204.semver
|
||||
|
||||
The presence of `Branch Name` indicates that this is a feature under
|
||||
development which will at some point be merged into the normal betas
|
||||
|
||||
@@ -559,8 +559,8 @@ If you leave this blank, it will use an internal key which is low performance.
|
||||
|
||||
#### --drive-client-secret
|
||||
|
||||
Google Application Client Secret
|
||||
Setting your own is recommended.
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_DRIVE_CLIENT_SECRET
|
||||
@@ -599,9 +599,6 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
Note that if this is blank, the first time rclone runs it will fill it
|
||||
in with the ID of the root folder.
|
||||
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_DRIVE_ROOT_FOLDER_ID
|
||||
@@ -614,15 +611,56 @@ Service Account Credentials JSON file path
|
||||
Leave blank normally.
|
||||
Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: service_account_file
|
||||
- Env Var: RCLONE_DRIVE_SERVICE_ACCOUNT_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --drive-alternate-export
|
||||
|
||||
Deprecated: no longer needed
|
||||
|
||||
- Config: alternate_export
|
||||
- Env Var: RCLONE_DRIVE_ALTERNATE_EXPORT
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to drive (Google Drive).
|
||||
|
||||
#### --drive-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_DRIVE_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --drive-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_DRIVE_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --drive-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_DRIVE_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --drive-service-account-credentials
|
||||
|
||||
Service Account Credentials JSON blob
|
||||
@@ -719,6 +757,15 @@ This will show trashed files in their original directory structure.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-starred-only
|
||||
|
||||
Only show files that are starred.
|
||||
|
||||
- Config: starred_only
|
||||
- Env Var: RCLONE_DRIVE_STARRED_ONLY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-formats
|
||||
|
||||
Deprecated: see export_formats
|
||||
@@ -813,24 +860,6 @@ Impersonate this user when using a service account.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --drive-alternate-export
|
||||
|
||||
Use alternate export URLs for google documents export.,
|
||||
|
||||
If this option is set this instructs rclone to use an alternate set of
|
||||
export URLs for drive documents. Users have reported that the
|
||||
official export URLs can't export large documents, whereas these
|
||||
unofficial ones can.
|
||||
|
||||
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
|
||||
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
|
||||
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).
|
||||
|
||||
- Config: alternate_export
|
||||
- Env Var: RCLONE_DRIVE_ALTERNATE_EXPORT
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload
|
||||
@@ -1087,6 +1116,63 @@ Options:
|
||||
|
||||
- "target": optional target remote for the shortcut destination
|
||||
|
||||
#### drives
|
||||
|
||||
List the shared drives available to this account
|
||||
|
||||
rclone backend drives remote: [options] [<arguments>+]
|
||||
|
||||
This command lists the shared drives (teamdrives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend drives drive:
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
|
||||
#### untrash
|
||||
|
||||
Untrash files and directories
|
||||
|
||||
rclone backend untrash remote: [options] [<arguments>+]
|
||||
|
||||
This command untrashes all the files and directories in the directory
|
||||
passed in recursively.
|
||||
|
||||
Usage:
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
### Limitations ###
|
||||
|
||||
@@ -126,7 +126,7 @@ Here are the standard options specific to dropbox (Dropbox).
|
||||
|
||||
#### --dropbox-client-id
|
||||
|
||||
Dropbox App Client Id
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
@@ -136,7 +136,7 @@ Leave blank normally.
|
||||
|
||||
#### --dropbox-client-secret
|
||||
|
||||
Dropbox App Client Secret
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
@@ -148,6 +148,35 @@ Leave blank normally.
|
||||
|
||||
Here are the advanced options specific to dropbox (Dropbox).
|
||||
|
||||
#### --dropbox-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_DROPBOX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_DROPBOX_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_DROPBOX_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --dropbox-chunk-size
|
||||
|
||||
Upload chunk size. (< 150M).
|
||||
|
||||
@@ -19,6 +19,7 @@ These flags are available for every command.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--bwlimit-file BwTimetable Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--ca-cert string CA certificate used to verify servers
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--check-first Do all the checks before starting transfers.
|
||||
@@ -64,6 +65,7 @@ These flags are available for every command.
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file (use - to read from stdin)
|
||||
-i, --interactive Enable interactive mode
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
@@ -118,6 +120,7 @@ These flags are available for every command.
|
||||
--rc-web-gui-force-update Force update to latest version of web gui
|
||||
--rc-web-gui-no-open-browser Don't open the browser automatically
|
||||
--rc-web-gui-update Check and update to latest version of web gui
|
||||
--refresh-times Refresh the modtime of remote files.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
@@ -137,14 +140,14 @@ These flags are available for every command.
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime (default "hash")
|
||||
--track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-json-log Use json log format.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.52.0")
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.53.3")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -155,10 +158,11 @@ and may be set in the config file.
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-client-id string OAuth Client Id
|
||||
--acd-client-secret string OAuth Client Secret
|
||||
--acd-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token string OAuth Access Token as a JSON blob.
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
@@ -177,6 +181,7 @@ and may be set in the config file.
|
||||
--azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4G)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default 1w)
|
||||
--b2-download-url string Custom endpoint for downloads.
|
||||
@@ -184,16 +189,22 @@ and may be set in the config file.
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s)
|
||||
--b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool.
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--box-access-token string Box App Primary Access Token
|
||||
--box-auth-url string Auth server URL.
|
||||
--box-box-config-file string Box App config.json location
|
||||
--box-box-sub-type string (default "user")
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-client-id string OAuth Client Id
|
||||
--box-client-secret string OAuth Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
|
||||
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point.
|
||||
--box-token string OAuth Access Token as a JSON blob.
|
||||
--box-token-url string Token server url.
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
@@ -205,7 +216,7 @@ and may be set in the config file.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-password string The password of the Plex user (obscured)
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
@@ -225,17 +236,18 @@ and may be set in the config file.
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-password string Password or pass phrase for encryption. (obscured)
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended. (obscured)
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-server-side-across-configs Allow server side operations (eg copy) to work across different crypt configs.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-auth-url string Auth server URL.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-client-secret string OAuth Client Secret
|
||||
--drive-disable-http2 Disable drive using http2 (default true)
|
||||
--drive-encoding MultiEncoder This sets the encoding for the backend. (default InvalidUtf8)
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
@@ -256,65 +268,81 @@ and may be set in the config file.
|
||||
--drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-skip-shortcuts If set skip shortcut files
|
||||
--drive-starred-only Only show files that are starred.
|
||||
--drive-stop-on-upload-limit Make upload limit errors be fatal
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-token string OAuth Access Token as a JSON blob.
|
||||
--drive-token-url string Token server url.
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-shared-date Use date file was shared instead of modified date.
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-auth-url string Auth server URL.
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-client-id string OAuth Client Id
|
||||
--dropbox-client-secret string OAuth Client Secret
|
||||
--dropbox-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
--dropbox-token string OAuth Access Token as a JSON blob.
|
||||
--dropbox-token-url string Token server url.
|
||||
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
|
||||
--fichier-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
|
||||
--fichier-shared-folder string If you want to download a shared folder, add this parameter
|
||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||
--ftp-disable-epsv Disable using EPSV even if server advertises support
|
||||
--ftp-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
|
||||
--ftp-explicit-tls Use FTP over TLS (Explicit)
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||
--ftp-pass string FTP password
|
||||
--ftp-pass string FTP password (obscured)
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-tls Use FTP over TLS (Implicit)
|
||||
--ftp-tls Use FTPS over TLS (Implicit)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-anonymous Access public buckets and objects without credentials
|
||||
--gcs-auth-url string Auth server URL.
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-bucket-policy-only Access checks should use bucket-level IAM policies.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-client-id string OAuth Client Id
|
||||
--gcs-client-secret string OAuth Client Secret
|
||||
--gcs-encoding MultiEncoder This sets the encoding for the backend. (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--gphotos-client-id string Google Application Client Id
|
||||
--gphotos-client-secret string Google Application Client Secret
|
||||
--gcs-token string OAuth Access Token as a JSON blob.
|
||||
--gcs-token-url string Token server url.
|
||||
--gphotos-auth-url string Auth server URL.
|
||||
--gphotos-client-id string OAuth Client Id
|
||||
--gphotos-client-secret string OAuth Client Secret
|
||||
--gphotos-read-only Set to make the Google Photos backend read only.
|
||||
--gphotos-read-size Set to read the size of media items.
|
||||
--gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
|
||||
--gphotos-token string OAuth Access Token as a JSON blob.
|
||||
--gphotos-token-url string Token server url.
|
||||
--http-headers CommaSepList Set HTTP headers for all transactions
|
||||
--http-no-head Don't use HEAD requests to find file sizes in dir listing
|
||||
--http-no-slash Set this if the site doesn't end directories with /
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-auth-url string Auth server URL.
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-client-id string OAuth Client Id
|
||||
--hubic-client-secret string OAuth Client Secret
|
||||
--hubic-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8)
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--hubic-token string OAuth Access Token as a JSON blob.
|
||||
--hubic-token-url string Token server url.
|
||||
--jottacloud-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-trashed-only Only show files that are in the trash.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--koofr-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--koofr-endpoint string The Koofr API endpoint to use (default "https://app.koofr.net")
|
||||
--koofr-mountid string Mount ID of the mount to use. If omitted, the primary mount is used.
|
||||
--koofr-password string Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
--koofr-password string Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
|
||||
--koofr-setmtime Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend. (default true)
|
||||
--koofr-user string Your Koofr user name
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
@@ -322,12 +350,13 @@ and may be set in the config file.
|
||||
--local-case-sensitive Force the filesystem to report itself as case sensitive.
|
||||
--local-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Dot)
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-set-modtime Disable setting modtime
|
||||
--local-no-sparse Disable sparse files for multi-thread downloads
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
|
||||
--mailru-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--mailru-pass string Password
|
||||
--mailru-pass string Password (obscured)
|
||||
--mailru-speedup-enable Skip full upload if there is another file with same data hash. (default true)
|
||||
--mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash). (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
|
||||
--mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3G)
|
||||
@@ -336,25 +365,33 @@ and may be set in the config file.
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-pass string Password. (obscured)
|
||||
--mega-user string User name
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-auth-url string Auth server URL.
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-client-id string OAuth Client Id
|
||||
--onedrive-client-secret string OAuth Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--onedrive-no-versions Remove all versions on modifying operations
|
||||
--onedrive-server-side-across-configs Allow server side operations (eg copy) to work across different onedrive configs.
|
||||
--onedrive-token string OAuth Access Token as a JSON blob.
|
||||
--onedrive-token-url string Token server url.
|
||||
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size. (default 10M)
|
||||
--opendrive-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
|
||||
--opendrive-password string Password.
|
||||
--opendrive-password string Password. (obscured)
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
--pcloud-auth-url string Auth server URL.
|
||||
--pcloud-client-id string OAuth Client Id
|
||||
--pcloud-client-secret string OAuth Client Secret
|
||||
--pcloud-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--pcloud-hostname string Hostname to connect to. (default "api.pcloud.com")
|
||||
--pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point. (default "d0")
|
||||
--pcloud-token string OAuth Access Token as a JSON blob.
|
||||
--pcloud-token-url string Token server url.
|
||||
--premiumizeme-encoding MultiEncoder This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--putio-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
@@ -371,7 +408,7 @@ and may be set in the config file.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 5G)
|
||||
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656G)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
@@ -380,13 +417,17 @@ and may be set in the config file.
|
||||
--s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
--s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request). (default 1000)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-max-upload-parts int Maximum number of parts in a multipart upload. (default 10000)
|
||||
--s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s)
|
||||
--s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool.
|
||||
--s3-no-check-bucket If set don't attempt to check the bucket exists or create it
|
||||
--s3-profile string Profile to use in the shared credentials file
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-shared-credentials-file string Path to the shared credentials file
|
||||
--s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-sse-customer-key string If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.
|
||||
--s3-sse-customer-key-md5 string If using SSE-C you must provide the secret encryption key MD5 checksum.
|
||||
@@ -400,24 +441,26 @@ and may be set in the config file.
|
||||
--seafile-create-library Should rclone create a library if it doesn't exist
|
||||
--seafile-encoding MultiEncoder This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
|
||||
--seafile-library string Name of the library. Leave blank to access all non-encrypted libraries.
|
||||
--seafile-library-key string Library password (for encrypted libraries only). Leave blank if you pass it through the command line.
|
||||
--seafile-pass string Password
|
||||
--seafile-library-key string Library password (for encrypted libraries only). Leave blank if you pass it through the command line. (obscured)
|
||||
--seafile-pass string Password (obscured)
|
||||
--seafile-url string URL of seafile host to connect to
|
||||
--seafile-user string User name (usually email address)
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file. (obscured)
|
||||
--sftp-key-pem string Raw PEM-encoded private key, If specified, will override key_file parameter.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-md5sum-command string The command used to read md5 hashes. Leave blank for autodetect.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent. (obscured)
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-server-command string Specifies the path or command to run a sftp server on the remote host.
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-sha1sum-command string The command used to read sha1 hashes. Leave blank for autodetect.
|
||||
--sftp-skip-links Set to skip any symlinks and any other non regular files.
|
||||
--sftp-subsystem string Specifies the SSH2 subsystem on the remote host. (default "sftp")
|
||||
--sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--sharefile-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 64M)
|
||||
@@ -470,12 +513,14 @@ and may be set in the config file.
|
||||
--union-upstreams string List of space separated upstreams.
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-bearer-token-command string Command to run to get a bearer token
|
||||
--webdav-pass string Password.
|
||||
--webdav-pass string Password. (obscured)
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-auth-url string Auth server URL.
|
||||
--yandex-client-id string OAuth Client Id
|
||||
--yandex-client-secret string OAuth Client Secret
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--yandex-token string OAuth Access Token as a JSON blob.
|
||||
--yandex-token-url string Token server url.
|
||||
```
|
||||
|
||||
@@ -179,6 +179,8 @@ FTP port, leave blank to use default (21)
|
||||
|
||||
FTP password
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
- Config: pass
|
||||
- Env Var: RCLONE_FTP_PASS
|
||||
- Type: string
|
||||
@@ -186,8 +188,7 @@ FTP password
|
||||
|
||||
#### --ftp-tls
|
||||
|
||||
Use FTP over TLS (Implicit)
|
||||
|
||||
Use FTPS over TLS (Implicit)
|
||||
When using implicit FTP over TLS the client will connect using TLS
|
||||
right from the start, which in turn breaks the compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
@@ -201,13 +202,12 @@ than port 21. Cannot be used in combination with explicit FTP.
|
||||
#### --ftp-explicit-tls
|
||||
|
||||
Use FTP over TLS (Explicit)
|
||||
|
||||
When using explicit FTP over TLS the client explicitly request
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.
|
||||
|
||||
- Config: explicit_tls
|
||||
- Env Var: RCLONE_FTP_TLS
|
||||
- Env Var: RCLONE_FTP_EXPLICIT_TLS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
|
||||
@@ -264,7 +264,7 @@ Here are the standard options specific to google cloud storage (Google Cloud Sto
|
||||
|
||||
#### --gcs-client-id
|
||||
|
||||
Google Application Client Id
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
@@ -274,7 +274,7 @@ Leave blank normally.
|
||||
|
||||
#### --gcs-client-secret
|
||||
|
||||
Google Application Client Secret
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
@@ -298,6 +298,9 @@ Service Account Credentials JSON file path
|
||||
Leave blank normally.
|
||||
Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
|
||||
- Config: service_account_file
|
||||
- Env Var: RCLONE_GCS_SERVICE_ACCOUNT_FILE
|
||||
- Type: string
|
||||
@@ -314,6 +317,16 @@ Needed only if you want use SA instead of interactive login.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gcs-anonymous
|
||||
|
||||
Access public buckets and objects without credentials
|
||||
Set to 'true' if you just want to download files and don't configure credentials.
|
||||
|
||||
- Config: anonymous
|
||||
- Env Var: RCLONE_GCS_ANONYMOUS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --gcs-object-acl
|
||||
|
||||
Access Control List for new objects.
|
||||
@@ -455,6 +468,35 @@ The storage class to use when storing objects in Google Cloud Storage.
|
||||
|
||||
Here are the advanced options specific to google cloud storage (Google Cloud Storage (this is not Google Drive)).
|
||||
|
||||
#### --gcs-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_GCS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gcs-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_GCS_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gcs-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_GCS_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gcs-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -315,7 +315,7 @@ Here are the standard options specific to google photos (Google Photos).
|
||||
|
||||
#### --gphotos-client-id
|
||||
|
||||
Google Application Client Id
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
@@ -325,7 +325,7 @@ Leave blank normally.
|
||||
|
||||
#### --gphotos-client-secret
|
||||
|
||||
Google Application Client Secret
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
@@ -349,6 +349,35 @@ to your photos, otherwise rclone will request full access.
|
||||
|
||||
Here are the advanced options specific to google photos (Google Photos).
|
||||
|
||||
#### --gphotos-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_GPHOTOS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gphotos-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_GPHOTOS_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gphotos-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_GPHOTOS_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --gphotos-read-size
|
||||
|
||||
Set to read the size of media items.
|
||||
|
||||
@@ -112,7 +112,7 @@ Here are the standard options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-client-id
|
||||
|
||||
Hubic Client Id
|
||||
OAuth Client Id
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_id
|
||||
@@ -122,7 +122,7 @@ Leave blank normally.
|
||||
|
||||
#### --hubic-client-secret
|
||||
|
||||
Hubic Client Secret
|
||||
OAuth Client Secret
|
||||
Leave blank normally.
|
||||
|
||||
- Config: client_secret
|
||||
@@ -134,6 +134,35 @@ Leave blank normally.
|
||||
|
||||
Here are the advanced options specific to hubic (Hubic).
|
||||
|
||||
#### --hubic-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_HUBIC_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --hubic-auth-url
|
||||
|
||||
Auth server URL.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_HUBIC_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --hubic-token-url
|
||||
|
||||
Token server url.
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_HUBIC_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --hubic-chunk-size
|
||||
|
||||
Above this size files will be chunked into a _segments container.
|
||||
|
||||
@@ -148,8 +148,13 @@ flag.
|
||||
Note that Jottacloud requires the MD5 hash before upload so if the
|
||||
source does not have an MD5 checksum then the file will be cached
|
||||
temporarily on disk (wherever the `TMPDIR` environment variable points
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
to) before it is uploaded. Small files will be cached in memory - see
|
||||
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
|
||||
When uploading from local disk the source checksum is always available,
|
||||
so this does not apply. Starting with rclone version 1.52 the same is
|
||||
true for crypted remotes (in older versions the crypt backend would not
|
||||
calculate hashes for uploads from local disk, so the Jottacloud
|
||||
backend had to do it as described above).
|
||||
|
||||
#### Restricted filename characters
|
||||
|
||||
@@ -220,16 +225,6 @@ Delete files permanently rather than putting them into the trash.
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --jottacloud-unlink
|
||||
|
||||
Remove existing public link to file/folder with link command rather than creating.
|
||||
Default is false, meaning link command will create or retrieve public link.
|
||||
|
||||
- Config: unlink
|
||||
- Env Var: RCLONE_JOTTACLOUD_UNLINK
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --jottacloud-upload-resume-limit
|
||||
|
||||
Files bigger than this can be resumed if the upload fail's.
|
||||
|
||||
@@ -115,6 +115,8 @@ Your Koofr user name
|
||||
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_KOOFR_PASSWORD
|
||||
- Type: string
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user