mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
241 Commits
fix-vfs-wr
...
v1.53.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a96c23026b | ||
|
|
c3dfa7d9a3 | ||
|
|
1936847548 | ||
|
|
89b4ccbbfa | ||
|
|
3c985a436b | ||
|
|
703f6002dd | ||
|
|
7de13fc426 | ||
|
|
c2f6d48d45 | ||
|
|
9d9999d17b | ||
|
|
15f31d3ca4 | ||
|
|
0ea51f74a1 | ||
|
|
6cd360233d | ||
|
|
687d2d495b | ||
|
|
50a107a5f3 | ||
|
|
2ed2861d09 | ||
|
|
e2cd449c62 | ||
|
|
98dbbc78ab | ||
|
|
53c4191350 | ||
|
|
e4ece15e68 | ||
|
|
fbf46908bf | ||
|
|
a96539eeec | ||
|
|
86cd5230d7 | ||
|
|
716019cf7d | ||
|
|
c59fe40795 | ||
|
|
ecd60f2430 | ||
|
|
d2a5640c3a | ||
|
|
8d3acfb38c | ||
|
|
200de46249 | ||
|
|
cee618bc03 | ||
|
|
db2aa771dc | ||
|
|
55bd60019e | ||
|
|
c8b11d27e1 | ||
|
|
4c215cc81e | ||
|
|
4df333255a | ||
|
|
843d684568 | ||
|
|
46ea3d93b5 | ||
|
|
89f2d43f17 | ||
|
|
cfc5d76fca | ||
|
|
0af493f693 | ||
|
|
51b3ee9a97 | ||
|
|
6a4b49479d | ||
|
|
4b03ee0f99 | ||
|
|
2f6231f7ac | ||
|
|
c0e6f54f01 | ||
|
|
def7b77d0f | ||
|
|
51b18a4a26 | ||
|
|
7cb76f9054 | ||
|
|
00ccc93482 | ||
|
|
f9fe494d93 | ||
|
|
4a0c266787 | ||
|
|
f48d0a518c | ||
|
|
99ff594773 | ||
|
|
6c140705e3 | ||
|
|
e76963a971 | ||
|
|
43ad7b10a2 | ||
|
|
f6970c65dd | ||
|
|
6012179c67 | ||
|
|
3ecdd4516f | ||
|
|
3b18ba1358 | ||
|
|
5fbbab58ed | ||
|
|
80b93beedf | ||
|
|
eb5c47fcfa | ||
|
|
c7335e780b | ||
|
|
878ebf3658 | ||
|
|
1c860ef252 | ||
|
|
a0494479f9 | ||
|
|
9a9a134188 | ||
|
|
41ccf01f29 | ||
|
|
06f3daa64b | ||
|
|
d5fe63c0a0 | ||
|
|
b7f0e776f6 | ||
|
|
b89f8c05cf | ||
|
|
b81dc16484 | ||
|
|
0e121eeddb | ||
|
|
0430163180 | ||
|
|
09a0dc1600 | ||
|
|
dd11778ac6 | ||
|
|
f36cbe5194 | ||
|
|
82a383588b | ||
|
|
8ae4d2cffe | ||
|
|
0f895c0697 | ||
|
|
937dd7fa1f | ||
|
|
33869387d1 | ||
|
|
3ec8e304b3 | ||
|
|
e62362094e | ||
|
|
6a0398211d | ||
|
|
e5a53d4c65 | ||
|
|
59d5767a07 | ||
|
|
087b5788e2 | ||
|
|
d944bfd936 | ||
|
|
d780fcf317 | ||
|
|
0a9b8eac80 | ||
|
|
1272a8f9a5 | ||
|
|
0b40eaedaf | ||
|
|
8340ff4fb9 | ||
|
|
f5abc168ed | ||
|
|
510ac341e1 | ||
|
|
358e2b2665 | ||
|
|
3305079a03 | ||
|
|
6ed8471a37 | ||
|
|
dc7ce37c32 | ||
|
|
57c10babfe | ||
|
|
23b2c58018 | ||
|
|
78abd21eec | ||
|
|
841edc729c | ||
|
|
b03fcbcc12 | ||
|
|
b60ac7b66a | ||
|
|
725ae91387 | ||
|
|
b7dd3ce608 | ||
|
|
70c8566cb8 | ||
|
|
0d066bdf46 | ||
|
|
3affc2e066 | ||
|
|
23c826db52 | ||
|
|
1ae36a4e32 | ||
|
|
bc969ad244 | ||
|
|
d7ac1f5b0e | ||
|
|
5bf53fe3ac | ||
|
|
9cc17cec9a | ||
|
|
e2816629d0 | ||
|
|
3f0d54daae | ||
|
|
7dcbebf9bc | ||
|
|
c31defbbd3 | ||
|
|
e54ce35019 | ||
|
|
75d54d720c | ||
|
|
cc0421cb9e | ||
|
|
9c01ac9894 | ||
|
|
20300d1f61 | ||
|
|
6231beefc5 | ||
|
|
068cfdaa00 | ||
|
|
7d62d1fc97 | ||
|
|
e13ac28b8d | ||
|
|
b30ee57cd9 | ||
|
|
921e384c4d | ||
|
|
bf685f600e | ||
|
|
b6d3cad70e | ||
|
|
c665201b85 | ||
|
|
d6996e3347 | ||
|
|
dffcc99373 | ||
|
|
09b79679cd | ||
|
|
cf68e61f40 | ||
|
|
22674d1146 | ||
|
|
f9ee0dc3f2 | ||
|
|
65fa6a946a | ||
|
|
4cf82118d9 | ||
|
|
5f56611a76 | ||
|
|
0f7a2f0f3c | ||
|
|
be2b310ace | ||
|
|
45afe97e8e | ||
|
|
fee8f21ce1 | ||
|
|
1abc252ed3 | ||
|
|
801a820c54 | ||
|
|
2bcc66c805 | ||
|
|
b5ba077a2f | ||
|
|
0931b84940 | ||
|
|
94a0991584 | ||
|
|
9d3d397f50 | ||
|
|
38e8415e77 | ||
|
|
fb9edbe34e | ||
|
|
85f9bd1abf | ||
|
|
63e4d2952b | ||
|
|
52247e9a9f | ||
|
|
d2ad293fae | ||
|
|
6082096f7e | ||
|
|
9a6fcd035b | ||
|
|
47d08ac1f1 | ||
|
|
c4c6a1ee7d | ||
|
|
29d6358f34 | ||
|
|
6308153ae7 | ||
|
|
a9713cd0ed | ||
|
|
1cae4152f9 | ||
|
|
4884bee8ba | ||
|
|
54fc2821cd | ||
|
|
5549fd25fc | ||
|
|
3d5a63607e | ||
|
|
cb7534dcdf | ||
|
|
770a6f2cad | ||
|
|
aab9aa8a2e | ||
|
|
3a14b1d5a9 | ||
|
|
ac044b1c54 | ||
|
|
61c7ea4085 | ||
|
|
01280798e9 | ||
|
|
db56d30078 | ||
|
|
a00274d2ab | ||
|
|
82975109af | ||
|
|
30eb094f28 | ||
|
|
b401a727f7 | ||
|
|
8eb16ce89c | ||
|
|
8e7eb37456 | ||
|
|
4d7f91309b | ||
|
|
109b695621 | ||
|
|
177d2f2f79 | ||
|
|
f5439ddc54 | ||
|
|
324077fb48 | ||
|
|
f50ab981f7 | ||
|
|
0c620ad076 | ||
|
|
49cf2eb7e4 | ||
|
|
a2afa9aadd | ||
|
|
c2f3949ded | ||
|
|
bf355c4527 | ||
|
|
3daa63cae8 | ||
|
|
4441e012cf | ||
|
|
122a47fba6 | ||
|
|
421585dd72 | ||
|
|
0bab9903ee | ||
|
|
700deb0a81 | ||
|
|
1222b78ec4 | ||
|
|
0ee16b51c4 | ||
|
|
26001d520a | ||
|
|
8bf265c775 | ||
|
|
62f0bbb598 | ||
|
|
d5f4c74697 | ||
|
|
8f42532b6d | ||
|
|
2288a5c617 | ||
|
|
957311f479 | ||
|
|
2cc381b91d | ||
|
|
f406dbbb4d | ||
|
|
3b2322285a | ||
|
|
47d093e863 | ||
|
|
b2ae94de5b | ||
|
|
4afea1ebaf | ||
|
|
711736054f | ||
|
|
d64212d902 | ||
|
|
8913679d88 | ||
|
|
4f9a80e2d3 | ||
|
|
aa93b39d9b | ||
|
|
101f82c6b3 | ||
|
|
d35673efc6 | ||
|
|
3286d1992b | ||
|
|
4ac662d144 | ||
|
|
d73a418a55 | ||
|
|
306a3e0cd7 | ||
|
|
975a53c9e3 | ||
|
|
78fdc5805b | ||
|
|
8f9d5af26d | ||
|
|
6ff5787b40 | ||
|
|
3c1c6d2f01 | ||
|
|
0272a7f405 | ||
|
|
e1d34ef427 | ||
|
|
26b4698212 | ||
|
|
2871268505 | ||
|
|
744828a4de |
88
.github/workflows/build.yml
vendored
88
.github/workflows/build.yml
vendored
@@ -19,24 +19,23 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'race', 'go1.11', 'go1.12', 'go1.13']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
go: '1.15.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
@@ -44,84 +43,77 @@ jobs:
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
go: '1.15.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
go: '1.15.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: race
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -136,7 +128,7 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
brew install --cask osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
@@ -144,10 +136,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -167,10 +159,22 @@ jobs:
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Run tests
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
@@ -222,8 +226,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
@@ -231,7 +235,7 @@ jobs:
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
@@ -242,9 +246,9 @@ jobs:
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=mod -v
|
||||
make
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
@@ -15,7 +15,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: beta
|
||||
imageName: rclone/rclone
|
||||
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
id: actual_major_version
|
||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||
- name: Build and publish image
|
||||
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
|
||||
uses: ilteoood/docker_buildx@1.1.0
|
||||
with:
|
||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||
imageName: rclone/rclone
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,3 +9,4 @@ rclone.iml
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
|
||||
6484
MANUAL.html
generated
6484
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8241
MANUAL.txt
generated
8241
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
75
Makefile
75
Makefile
@@ -7,27 +7,29 @@ RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# Next version
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
TAG_BRANCH := .$(BRANCH)
|
||||
BRANCH_PATH := branch/$(BRANCH)/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
|
||||
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
|
||||
# TAG is current version + commit number + commit + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
ifdef RELEASE_TAG
|
||||
TAG := $(RELEASE_TAG)
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... )
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
@@ -44,20 +46,19 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -75,10 +76,10 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@@ -164,6 +165,11 @@ validate_website: website
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
vendorball:
|
||||
go mod vendor
|
||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||
rm -rf vendor
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
@@ -182,10 +188,10 @@ upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
|
||||
@@ -193,23 +199,23 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
|
||||
ci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
ci_beta:
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
@@ -221,26 +227,33 @@ fetch_binaries:
|
||||
serve: website
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
@echo "Version is $(VERSION)"
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
55
RELEASE.md
55
RELEASE.md
@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master
|
||||
* git checkout master # see below for stable branch
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
@@ -25,12 +25,13 @@ This file describes how to make the various kinds of releases
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* make startdev # make startstable for stable branch
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
@@ -41,60 +42,34 @@ Early in the next release cycle update the dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
Set vars
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -46,9 +47,5 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
||||
}
|
||||
|
||||
@@ -76,23 +76,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
@@ -143,7 +127,7 @@ underlying S3 storage.`,
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -937,8 +921,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -967,8 +967,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
dir := "" // forward compat!
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,go1.13
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris !go1.13
|
||||
// +build plan9 solaris js !go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1143,7 +1143,8 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
// if oldOnly is true then it deletes only non current files.
|
||||
//
|
||||
// Implemented here so we can make sure we delete old versions.
|
||||
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
|
||||
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
@@ -1218,19 +1219,19 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
wg.Wait()
|
||||
|
||||
if !oldOnly {
|
||||
checkErr(f.Rmdir(ctx, ""))
|
||||
checkErr(f.Rmdir(ctx, dir))
|
||||
}
|
||||
return errReturn
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purge(ctx, dir, false)
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
|
||||
return f.purge(ctx, "", true)
|
||||
}
|
||||
|
||||
// copy does a server side copy from dstObj <- srcObj
|
||||
@@ -1672,8 +1673,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
// Download by id if set and not using DownloadURL otherwise by name
|
||||
if o.id != "" && o.fs.opt.DownloadURL == "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -87,26 +87,23 @@ func init() {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
@@ -114,6 +111,9 @@ func init() {
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -149,7 +149,7 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -247,6 +247,7 @@ type Options struct {
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -385,16 +386,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
var ts *oauthutil.TokenSource
|
||||
// If not using an accessToken, create an oauth client and tokensource
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
@@ -404,23 +411,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// If using an accessToken, set the Authorization header
|
||||
if f.opt.AccessToken != "" {
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
if ts != nil {
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Get rootFolderID
|
||||
@@ -842,8 +856,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
@@ -1258,8 +1272,10 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
29
backend/cache/cache.go
vendored
29
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
@@ -361,15 +361,10 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -390,6 +385,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
@@ -1702,17 +1698,20 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return f.Fs.Hashes()
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
// Purge all files in the directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if dir == "" {
|
||||
// FIXME this isn't quite right as it should purge the dir prefix
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
}
|
||||
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return nil
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
|
||||
err := do(ctx)
|
||||
err := do(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
356
backend/cache/cache_internal_test.go
vendored
356
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -31,13 +30,10 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -53,9 +49,7 @@ const (
|
||||
|
||||
var (
|
||||
remoteName string
|
||||
mountDir string
|
||||
uploadDir string
|
||||
useMount bool
|
||||
runInstance *run
|
||||
errNotSupported = errors.New("not supported")
|
||||
decryptedToEncryptedRemotes = map[string]string{
|
||||
@@ -91,9 +85,7 @@ var (
|
||||
|
||||
func init() {
|
||||
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
|
||||
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
|
||||
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
|
||||
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
|
||||
}
|
||||
|
||||
// TestMain drives the tests
|
||||
@@ -101,7 +93,7 @@ func TestMain(m *testing.M) {
|
||||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
@@ -274,31 +266,6 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
require.Nil(t, obj)
|
||||
}
|
||||
|
||||
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
|
||||
if !runInstance.useMount {
|
||||
t.Skip("test needs mount mode")
|
||||
}
|
||||
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
var testData []byte
|
||||
if runInstance.rootIsCrypt {
|
||||
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
|
||||
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test content", string(data))
|
||||
}
|
||||
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
@@ -694,79 +661,6 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
t.Skipf("needs mount")
|
||||
}
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("needs drive")
|
||||
}
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached file cleared")
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached directory cleared")
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
@@ -914,15 +808,9 @@ func TestInternalBug2117(t *testing.T) {
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
runDefaultCfgMap configmap.Simple
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
isMounted bool
|
||||
rootIsCrypt bool
|
||||
wrappedIsExternal bool
|
||||
unmountFn func() error
|
||||
unmountRes chan error
|
||||
vfs *vfs.VFS
|
||||
tempFiles []*os.File
|
||||
dbPath string
|
||||
chunkPath string
|
||||
@@ -932,9 +820,7 @@ type run struct {
|
||||
func newRun() *run {
|
||||
var err error
|
||||
r := &run{
|
||||
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
useMount: useMount,
|
||||
isMounted: false,
|
||||
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
}
|
||||
|
||||
// Read in all the defaults for all the options
|
||||
@@ -947,32 +833,6 @@ func newRun() *run {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create mount dir: %v", err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// Find a free drive letter
|
||||
drive := ""
|
||||
for letter := 'E'; letter <= 'Z'; letter++ {
|
||||
drive = string(letter) + ":"
|
||||
_, err := os.Stat(drive + "\\")
|
||||
if os.IsNotExist(err) {
|
||||
goto found
|
||||
}
|
||||
}
|
||||
log.Print("Couldn't find free drive letter for test")
|
||||
found:
|
||||
r.mntDir = drive
|
||||
}
|
||||
} else {
|
||||
r.mntDir = mountDir
|
||||
}
|
||||
log.Printf("Mount Dir: %v", r.mntDir)
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
@@ -1086,33 +946,21 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
if purge {
|
||||
_ = f.Features().Purge(context.Background())
|
||||
_ = f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
if r.useMount && !r.isMounted {
|
||||
r.mountFs(t, f)
|
||||
}
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
if r.useMount && r.isMounted {
|
||||
r.unmountFs(t, f)
|
||||
}
|
||||
|
||||
err := f.Features().Purge(context.Background())
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
cfs.StopBackgroundRunners()
|
||||
|
||||
if r.useMount && runtime.GOOS != "windows" {
|
||||
err = os.RemoveAll(r.mntDir)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = os.RemoveAll(r.tmpUploadDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1152,37 +1000,11 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
err = r.retryBlock(func() error {
|
||||
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
|
||||
}, 3, time.Second*3)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else {
|
||||
r.writeObjectBytes(t, f, remote, data)
|
||||
}
|
||||
r.writeObjectBytes(t, f, remote, data)
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
if r.useMount {
|
||||
out, err := os.Create(path.Join(r.mntDir, remote))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else {
|
||||
r.writeObjectReader(t, f, remote, in)
|
||||
}
|
||||
r.writeObjectReader(t, f, remote, in)
|
||||
}
|
||||
|
||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
@@ -1199,10 +1021,6 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||
obj, err := f.Put(context.Background(), in, objInfo)
|
||||
require.NoError(t, err)
|
||||
if r.useMount {
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -1210,26 +1028,16 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
var err error
|
||||
var obj fs.Object
|
||||
|
||||
if r.useMount {
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
} else {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
}
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
require.NoError(t, err)
|
||||
|
||||
return obj
|
||||
@@ -1239,30 +1047,12 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
size := end - offset
|
||||
checkSample := make([]byte, size)
|
||||
|
||||
if r.useMount {
|
||||
f, err := os.Open(path.Join(r.mntDir, remote))
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
_, _ = f.Seek(offset, io.SeekStart)
|
||||
totalRead, err := io.ReadFull(f, checkSample)
|
||||
checkSample = checkSample[:totalRead]
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
|
||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
}
|
||||
@@ -1285,28 +1075,19 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
|
||||
}
|
||||
|
||||
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
||||
var err error
|
||||
if r.useMount {
|
||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||
} else {
|
||||
err = f.Mkdir(context.Background(), remote)
|
||||
}
|
||||
err := f.Mkdir(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
err = os.Remove(path.Join(r.mntDir, remote))
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
} else {
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
} else {
|
||||
err = obj.Remove(context.Background())
|
||||
}
|
||||
err = obj.Remove(context.Background())
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -1315,18 +1096,10 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
if r.useMount {
|
||||
var list []os.FileInfo
|
||||
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
return l, err
|
||||
}
|
||||
@@ -1355,13 +1128,7 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if runInstance.useMount {
|
||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().DirMove != nil {
|
||||
if rootFs.Features().DirMove != nil {
|
||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1377,13 +1144,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if runInstance.useMount {
|
||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Move != nil {
|
||||
if rootFs.Features().Move != nil {
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1403,13 +1164,7 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Copy != nil {
|
||||
if rootFs.Features().Copy != nil {
|
||||
obj, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1429,13 +1184,6 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
@@ -1446,13 +1194,6 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
return fi.Size(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
@@ -1463,28 +1204,15 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(context.Background(), r, objInfo1)
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
reader := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(context.Background(), reader, objInfo1)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
21
backend/cache/cache_mount_other_test.go
vendored
21
backend/cache/cache_mount_other_test.go
vendored
@@ -1,21 +0,0 @@
|
||||
// +build !linux !go1.13
|
||||
// +build !darwin !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
// +build !windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
panic("mountFs not defined for this platform")
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
panic("unmountFs not defined for this platform")
|
||||
}
|
||||
79
backend/cache/cache_mount_unix_test.go
vendored
79
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,79 +0,0 @@
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/cmd/mount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
var options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
c, err := fuse.Mount(r.mntDir, options...)
|
||||
require.NoError(t, err)
|
||||
filesys := mount.NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
require.NoError(t, c.MountError)
|
||||
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = filesys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
125
backend/cache/cache_mount_windows_test.go
vendored
125
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,125 +0,0 @@
|
||||
// +build windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/cmount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
|
||||
device := f.Name() + ":" + f.Root()
|
||||
options := []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"-o", "allow_other",
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
"--FileSystemName=rclone",
|
||||
}
|
||||
|
||||
fsys := cmount.NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(r.mntDir, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
if host.Unmount() {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err != nil
|
||||
}) {
|
||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-r.unmountRes:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second * 3):
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err == nil
|
||||
}) {
|
||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = fsys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 js
|
||||
|
||||
package cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -120,6 +121,8 @@ const maxTransactionProbes = 100
|
||||
// standard chunker errors
|
||||
var (
|
||||
ErrChunkOverflow = errors.New("chunk number overflow")
|
||||
ErrMetaTooBig = errors.New("metadata is too big")
|
||||
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
|
||||
)
|
||||
|
||||
// variants of baseMove's parameter delMode
|
||||
@@ -238,15 +241,18 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
baseInfo, baseName, basePath, baseConfig, err := fs.ConfigFs(remote)
|
||||
baseName, basePath, err := fspath.Parse(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
if baseName != "" {
|
||||
baseName += ":"
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := baseInfo.NewFs(baseName, remotePath, baseConfig)
|
||||
baseFs, err := cache.Get(baseName + remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", baseName, remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||
}
|
||||
if !operations.CanServerSideMove(baseFs) {
|
||||
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
|
||||
@@ -258,6 +264,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
|
||||
@@ -271,7 +278,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
|
||||
_, testErr := cache.Get(baseName + firstChunkPath)
|
||||
if testErr == fs.ErrorIsFile {
|
||||
err = testErr
|
||||
}
|
||||
@@ -291,6 +298,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -686,43 +695,47 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
switch entry := dirOrObject.(type) {
|
||||
case fs.Object:
|
||||
remote := entry.Remote()
|
||||
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
|
||||
if xactID != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore temporary chunk %q", remote)
|
||||
}
|
||||
break
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
|
||||
if mainRemote == "" {
|
||||
// this is meta object or standalone file
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
break
|
||||
}
|
||||
// this is some kind of chunk
|
||||
// metobject should have been created above if present
|
||||
isSpecial := xactID != "" || ctrlType != ""
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta && !isSpecial {
|
||||
fs.Debugf(f, "skip orphan data chunk %q", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil && !f.useMeta {
|
||||
// this is the "nometa" case
|
||||
// create dummy chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
if ctrlType != "" {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore control chunk %q", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
if isSpecial {
|
||||
if revealHidden {
|
||||
fs.Infof(f, "ignore non-data chunk %q", remote)
|
||||
}
|
||||
mainObject := byRemote[mainRemote]
|
||||
if mainObject == nil && f.useMeta {
|
||||
fs.Debugf(f, "skip chunk %q without meta object", remote)
|
||||
break
|
||||
}
|
||||
if mainObject == nil {
|
||||
// useMeta is false - create chunked object without metadata
|
||||
mainObject = f.newObject(mainRemote, nil, nil)
|
||||
byRemote[mainRemote] = mainObject
|
||||
if !badEntry[mainRemote] {
|
||||
tempEntries = append(tempEntries, mainObject)
|
||||
}
|
||||
}
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
// need to read metadata to ensure actual object type
|
||||
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
|
||||
mainObject.unsure = true
|
||||
}
|
||||
break
|
||||
}
|
||||
object := f.newObject("", entry, nil)
|
||||
byRemote[remote] = object
|
||||
tempEntries = append(tempEntries, object)
|
||||
if err := mainObject.addChunk(entry, chunkNo); err != nil {
|
||||
if f.opt.FailHard {
|
||||
return nil, err
|
||||
}
|
||||
badEntry[mainRemote] = true
|
||||
}
|
||||
case fs.Directory:
|
||||
isSubdir[entry.Remote()] = true
|
||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||
@@ -777,6 +790,13 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
// but opening even a small file can be slow on some backends.
|
||||
//
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.scanObject(ctx, remote, false)
|
||||
}
|
||||
|
||||
// scanObject is like NewObject with optional quick scan mode.
|
||||
// The quick mode avoids directory requests other than `List`,
|
||||
// ignores non-chunked objects and skips chunk size checks.
|
||||
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
|
||||
if err := f.forbidChunk(false, remote); err != nil {
|
||||
return nil, errors.Wrap(err, "can't access")
|
||||
}
|
||||
@@ -837,8 +857,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
continue // bypass regexp to save cpu
|
||||
}
|
||||
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
|
||||
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
|
||||
continue // skip non-conforming, temporary and control chunks
|
||||
if mainRemote == "" || mainRemote != remote {
|
||||
continue // skip non-conforming chunks
|
||||
}
|
||||
if ctrlType != "" || xactID != "" {
|
||||
if f.useMeta {
|
||||
// temporary/control chunk calls for lazy metadata read
|
||||
o.unsure = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
|
||||
if err := o.addChunk(entry, chunkNo); err != nil {
|
||||
@@ -848,7 +875,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
|
||||
// Scanning hasn't found data chunks with conforming names.
|
||||
if f.useMeta {
|
||||
if f.useMeta || quickScan {
|
||||
// Metadata is required but absent and there are no chunks.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -871,8 +898,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// file without metadata. Validate it and update the total data size.
|
||||
// As an optimization, skip metadata reading here - we will call
|
||||
// readMetadata lazily when needed (reading can be expensive).
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
if !quickScan {
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
@@ -881,13 +910,24 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if o.isFull {
|
||||
return nil
|
||||
}
|
||||
if !o.isComposite() || !o.f.useMeta {
|
||||
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
|
||||
o.isFull = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate metadata
|
||||
metaObject := o.main
|
||||
if metaObject.Size() > maxMetadataSize {
|
||||
if o.unsure {
|
||||
// this is not metadata but a foreign object
|
||||
o.unsure = false
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
return ErrMetaTooBig
|
||||
}
|
||||
|
||||
reader, err := metaObject.Open(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -900,8 +940,22 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
switch o.f.opt.MetaFormat {
|
||||
case "simplejson":
|
||||
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
|
||||
if err != nil {
|
||||
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
|
||||
if o.unsure {
|
||||
o.unsure = false
|
||||
if !madeByChunker {
|
||||
// this is not metadata but a foreign object
|
||||
o.chunks = nil // make isComposite return false
|
||||
o.isFull = true // cache results
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case nil:
|
||||
// fall thru
|
||||
case ErrMetaTooBig, ErrMetaUnknown:
|
||||
return err // return these errors unwrapped for unit tests
|
||||
default:
|
||||
return errors.Wrap(err, "invalid metadata")
|
||||
}
|
||||
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
|
||||
@@ -916,7 +970,27 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// put implements Put, PutStream, PutUnchecked, Update
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
|
||||
func (f *Fs) put(
|
||||
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
|
||||
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
|
||||
|
||||
if err := f.forbidChunk(src, remote); err != nil {
|
||||
return nil, errors.Wrap(err, action+" refused")
|
||||
}
|
||||
if target == nil {
|
||||
// Get target object with a quick directory scan
|
||||
if obj, err := f.scanObject(ctx, remote, true); err == nil {
|
||||
target = obj
|
||||
}
|
||||
}
|
||||
if target != nil {
|
||||
obj := target.(*Object)
|
||||
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// refuse to update a file of unsupported format
|
||||
return nil, errors.Wrap(err, "refusing to "+action)
|
||||
}
|
||||
}
|
||||
|
||||
c := f.newChunkingReader(src)
|
||||
wrapIn := c.wrapStream(ctx, in, src)
|
||||
|
||||
@@ -953,6 +1027,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1004,8 +1080,8 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
// Check for input that looks like valid metadata
|
||||
needMeta := len(c.chunks) > 1
|
||||
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
|
||||
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
|
||||
needMeta = err == nil
|
||||
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
|
||||
needMeta = madeByChunker
|
||||
}
|
||||
|
||||
// Finalize small object as non-chunked.
|
||||
@@ -1161,10 +1237,14 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
c.chunkLimit = c.chunkSize
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
@@ -1248,29 +1328,16 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to put")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if err := f.forbidChunk(src, src.Remote()); err != nil {
|
||||
return nil, errors.Wrap(err, "refusing to upload")
|
||||
}
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
|
||||
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
|
||||
return errors.Wrap(err, "update refused")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to update a file of unsupported format
|
||||
return errors.Wrap(err, "refusing to update")
|
||||
}
|
||||
basePut := o.f.base.Put
|
||||
if src.Size() < 0 {
|
||||
basePut = o.f.base.Features().PutStream
|
||||
@@ -1278,7 +1345,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.New("wrapped file system does not support streaming uploads")
|
||||
}
|
||||
}
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
|
||||
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
|
||||
if err == nil {
|
||||
*o = *oNew.(*Object)
|
||||
}
|
||||
@@ -1333,7 +1400,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.base.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
@@ -1344,12 +1411,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// As a result it removes not only composite chunker files with their
|
||||
// active chunks but also all hidden temporary chunks in the directory.
|
||||
//
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.base.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx)
|
||||
return do(ctx, dir)
|
||||
}
|
||||
|
||||
// Remove an object (chunks and metadata, if any)
|
||||
@@ -1392,7 +1459,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// to corrupt file in hard mode. Hence, refuse to Remove, too.
|
||||
return errors.Wrap(err, "refuse to corrupt")
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
|
||||
// Proceed but warn user that unexpected things can happen.
|
||||
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
|
||||
}
|
||||
@@ -1420,6 +1487,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
if err := f.forbidChunk(o, remote); err != nil {
|
||||
return nil, errors.Wrapf(err, "can't %s", opName)
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
if !o.isComposite() {
|
||||
fs.Debugf(o, "%s non-chunked object...", opName)
|
||||
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
|
||||
@@ -1428,11 +1500,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
|
||||
}
|
||||
return f.newObject("", oResult, nil), nil
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// Refuse to copy/move composite files with invalid or future
|
||||
// metadata format which might involve unsupported chunk types.
|
||||
return nil, errors.Wrapf(err, "can't %s this file", opName)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
|
||||
mainRemote := o.remote
|
||||
@@ -1523,6 +1590,10 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
|
||||
return
|
||||
}
|
||||
|
||||
if obj.unsure {
|
||||
// ensure object is composite if need to re-read metadata
|
||||
_ = obj.readMetadata(ctx)
|
||||
}
|
||||
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
|
||||
if !requireMetaHash && !f.hashAll {
|
||||
ok = true // hash is not required for metadata
|
||||
@@ -1706,6 +1777,7 @@ type Object struct {
|
||||
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
|
||||
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
|
||||
isFull bool // true if metadata has been read
|
||||
unsure bool // true if need to read metadata to detect object type
|
||||
md5 string
|
||||
sha1 string
|
||||
f *Fs
|
||||
@@ -1856,15 +1928,16 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
// on the level of wrapped remote but chunker is unaware of that.
|
||||
//
|
||||
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
if !o.isComposite() {
|
||||
// First, chain to the wrapped non-chunked file if possible.
|
||||
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
return "", err // valid metadata is required to get hash, abort
|
||||
}
|
||||
|
||||
// Try hash from metadata if the file is composite or if wrapped remote fails.
|
||||
switch hashType {
|
||||
case hash.MD5:
|
||||
@@ -1889,13 +1962,13 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
if err := o.readMetadata(ctx); err != nil {
|
||||
// refuse to open unsupported format
|
||||
return nil, errors.Wrap(err, "can't open")
|
||||
}
|
||||
if !o.isComposite() {
|
||||
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
|
||||
}
|
||||
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -2168,57 +2241,57 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
|
||||
// handled by current implementation.
|
||||
// The version check below will then explicitly ask user to upgrade rclone.
|
||||
//
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
|
||||
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
|
||||
// Be strict about JSON format
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if data != nil && len(data) > maxMetadataSize {
|
||||
return nil, errors.New("too big")
|
||||
return nil, false, ErrMetaTooBig
|
||||
}
|
||||
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
|
||||
return nil, errors.New("invalid json")
|
||||
return nil, false, errors.New("invalid json")
|
||||
}
|
||||
var metadata metaSimpleJSON
|
||||
err = json.Unmarshal(data, &metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, false, err
|
||||
}
|
||||
// Basic fields are strictly required
|
||||
// to reduce possibility that a random small file resembles metadata.
|
||||
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
|
||||
return nil, errors.New("missing required field")
|
||||
return nil, false, errors.New("missing required field")
|
||||
}
|
||||
// Perform strict checks, avoid corruption of future metadata formats.
|
||||
if *metadata.Version < 1 {
|
||||
return nil, errors.New("wrong version")
|
||||
return nil, false, errors.New("wrong version")
|
||||
}
|
||||
if *metadata.Size < 0 {
|
||||
return nil, errors.New("negative file size")
|
||||
return nil, false, errors.New("negative file size")
|
||||
}
|
||||
if *metadata.ChunkNum < 0 {
|
||||
return nil, errors.New("negative number of chunks")
|
||||
return nil, false, errors.New("negative number of chunks")
|
||||
}
|
||||
if *metadata.ChunkNum > maxSafeChunkNumber {
|
||||
return nil, ErrChunkOverflow
|
||||
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
|
||||
}
|
||||
if metadata.MD5 != "" {
|
||||
_, err = hex.DecodeString(metadata.MD5)
|
||||
if len(metadata.MD5) != 32 || err != nil {
|
||||
return nil, errors.New("wrong md5 hash")
|
||||
return nil, false, errors.New("wrong md5 hash")
|
||||
}
|
||||
}
|
||||
if metadata.SHA1 != "" {
|
||||
_, err = hex.DecodeString(metadata.SHA1)
|
||||
if len(metadata.SHA1) != 40 || err != nil {
|
||||
return nil, errors.New("wrong sha1 hash")
|
||||
return nil, false, errors.New("wrong sha1 hash")
|
||||
}
|
||||
}
|
||||
// ChunkNum is allowed to be 0 in future versions
|
||||
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
|
||||
return nil, errors.New("wrong number of chunks")
|
||||
return nil, false, errors.New("wrong number of chunks")
|
||||
}
|
||||
// Non-strict mode also accepts future metadata versions
|
||||
if *metadata.Version > metadataVersion && strictChecks {
|
||||
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
|
||||
if *metadata.Version > metadataVersion {
|
||||
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
|
||||
}
|
||||
|
||||
var nilFs *Fs // nil object triggers appropriate type method
|
||||
@@ -2226,7 +2299,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte,
|
||||
info.nChunks = *metadata.ChunkNum
|
||||
info.md5 = metadata.MD5
|
||||
info.sha1 = metadata.SHA1
|
||||
return info, nil
|
||||
return info, true, nil
|
||||
}
|
||||
|
||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -158,24 +159,25 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
var wrappedFs fs.Fs
|
||||
if rpath == "" {
|
||||
wrappedFs, err = cache.Get(remote)
|
||||
} else {
|
||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
}
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -184,6 +186,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
@@ -427,18 +430,18 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx)
|
||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -37,6 +36,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -69,7 +69,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
)
|
||||
@@ -157,6 +157,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -192,13 +203,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nSetting your own is recommended.",
|
||||
}, {
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
Examples: []fs.OptionExample{{
|
||||
@@ -224,9 +229,6 @@ Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
Note that if this is blank, the first time rclone runs it will fill it
|
||||
in with the ID of the root folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
@@ -289,6 +291,11 @@ commands (copy, sync, etc), and with all other commands too.`,
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "starred_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are starred.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: "",
|
||||
@@ -350,27 +357,15 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.
|
||||
|
||||
Note that if this is used then "root_folder_id" will be ignored.
|
||||
`,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: `Use alternate export URLs for google documents export.,
|
||||
|
||||
If this option is set this instructs rclone to use an alternate set of
|
||||
export URLs for drive documents. Users have reported that the
|
||||
official export URLs can't export large documents, whereas these
|
||||
unofficial ones can.
|
||||
|
||||
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
|
||||
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
|
||||
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
|
||||
Advanced: true,
|
||||
Help: "Deprecated: no longer needed",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
@@ -494,7 +489,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
// register duplicate MIME types first
|
||||
@@ -524,6 +519,7 @@ type Options struct {
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
StarredOnly bool `config:"starred_only"`
|
||||
Extensions string `config:"formats"`
|
||||
ExportExtensions string `config:"export_formats"`
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
@@ -532,7 +528,6 @@ type Options struct {
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
@@ -707,6 +702,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
query = append(query, q)
|
||||
}
|
||||
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
@@ -718,8 +714,16 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if parentsQuery.Len() > 1 {
|
||||
_, _ = parentsQuery.WriteString(" or ")
|
||||
}
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
if (f.opt.SharedWithMe || f.opt.StarredOnly) && dirID == f.rootFolderID {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
}
|
||||
if f.opt.StarredOnly {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString(" and ")
|
||||
}
|
||||
_, _ = parentsQuery.WriteString("starred=true")
|
||||
}
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
@@ -929,55 +933,32 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
f, err := newFs(name, "", m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "config team drive failed to create oauth client")
|
||||
}
|
||||
svc, err := drive.New(client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "config team drive failed to make drive client")
|
||||
return errors.Wrap(err, "failed to make Fs to list teamdrives")
|
||||
}
|
||||
fmt.Printf("Fetching team drive list...\n")
|
||||
var driveIDs, driveNames []string
|
||||
listTeamDrives := svc.Teamdrives.List().PageSize(100)
|
||||
listFailed := false
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Listing team drives failed: %v\n", err)
|
||||
listFailed = true
|
||||
break
|
||||
}
|
||||
for _, drive := range teamDrives.TeamDrives {
|
||||
driveIDs = append(driveIDs, drive.Id)
|
||||
driveNames = append(driveNames, drive.Name)
|
||||
}
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listTeamDrives.PageToken(teamDrives.NextPageToken)
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var driveID string
|
||||
if !listFailed && len(driveIDs) == 0 {
|
||||
if len(teamDrives) == 0 {
|
||||
fmt.Printf("No team drives found in your account")
|
||||
} else {
|
||||
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
return nil
|
||||
}
|
||||
var driveIDs, driveNames []string
|
||||
for _, teamDrive := range teamDrives {
|
||||
driveIDs = append(driveIDs, teamDrive.Id)
|
||||
driveNames = append(driveNames, teamDrive.Name)
|
||||
}
|
||||
driveID := config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
@@ -1060,9 +1041,11 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// newFs partially constructs Fs from the path
|
||||
//
|
||||
// It constructs a valid Fs but doesn't attempt to figure out whether
|
||||
// it is a file or a directory.
|
||||
func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -1092,7 +1075,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(opt),
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
@@ -1122,25 +1105,26 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// If impersonating warn about root_folder_id if set and unset it
|
||||
//
|
||||
// This is because rclone v1.51 and v1.52 cached root_folder_id when
|
||||
// using impersonate which they shouldn't have done. It is possible
|
||||
// someone is using impersonate and root_folder_id in which case this
|
||||
// breaks their workflow. There isn't an easy way around that.
|
||||
if opt.RootFolderID != "" && opt.RootFolderID != "appDataFolder" && opt.Impersonate != "" {
|
||||
fs.Logf(f, "Ignoring cached root_folder_id when using --drive-impersonate")
|
||||
opt.RootFolderID = ""
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
f, err := newFs(name, path, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config and not impersonating
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
// Set the root folder ID
|
||||
if f.opt.RootFolderID != "" {
|
||||
// use root_folder ID if set
|
||||
f.rootFolderID = f.opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
// otherwise use team_drive if set
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
// Look up the root ID and cache it in the config
|
||||
// otherwise look up the actual root ID
|
||||
rootID, err := f.getRootID()
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
@@ -1152,27 +1136,24 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
// Don't cache the root folder ID if impersonating
|
||||
if opt.Impersonate == "" {
|
||||
m.Set("root_folder_id", rootID)
|
||||
}
|
||||
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
|
||||
// Parse extensions
|
||||
if opt.Extensions != "" {
|
||||
if opt.ExportExtensions != defaultExportExtensions {
|
||||
if f.opt.Extensions != "" {
|
||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||
return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
|
||||
}
|
||||
opt.Extensions, opt.ExportExtensions = "", opt.Extensions
|
||||
f.opt.Extensions, f.opt.ExportExtensions = "", f.opt.Extensions
|
||||
}
|
||||
f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
|
||||
f.exportExtensions, _, err = parseExtensions(f.opt.ExportExtensions, defaultExportExtensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
|
||||
_, f.importMimeTypes, err = parseExtensions(f.opt.ImportExtensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1181,7 +1162,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
newRoot, remote := dircache.SplitPath(f.root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
|
||||
tempF.root = newRoot
|
||||
@@ -1272,20 +1253,7 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := actualID(info.Id)
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
|
||||
if f.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.document":
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.presentation":
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
|
||||
}
|
||||
}
|
||||
url := info.ExportLinks[mediaType]
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
baseObject.bytes = -1
|
||||
baseObject.mimeType = exportMimeType
|
||||
@@ -1662,7 +1630,7 @@ func (s listRSlices) Less(i, j int) bool {
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error, sendJob func(listREntry)) {
|
||||
var dirs []string
|
||||
var paths []string
|
||||
var grouping int32
|
||||
@@ -1741,26 +1709,19 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// https://issuetracker.google.com/issues/149522397
|
||||
if len(dirs) > 1 && !foundItems {
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Logf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
var recycled = make([]listREntry, len(dirs))
|
||||
f.listRmu.Lock()
|
||||
for i := range dirs {
|
||||
recycled[i] = listREntry{id: dirs[i], path: paths[i]}
|
||||
// Requeue the jobs
|
||||
job := listREntry{id: dirs[i], path: paths[i]}
|
||||
sendJob(job)
|
||||
// Make a note of these dirs - if they all turn
|
||||
// out to be empty then we can re-enable grouping
|
||||
f.listRempties[dirs[i]] = struct{}{}
|
||||
}
|
||||
f.listRmu.Unlock()
|
||||
// recycle these in the background so we don't deadlock
|
||||
// the listR runners if they all get here
|
||||
wg.Add(len(recycled))
|
||||
go func() {
|
||||
for _, entry := range recycled {
|
||||
in <- entry
|
||||
}
|
||||
fs.Debugf(f, "Recycled %d entries", len(recycled))
|
||||
}()
|
||||
fs.Debugf(f, "Recycled %d entries", len(dirs))
|
||||
}
|
||||
// If using a grouping of 1 and dir was empty then check to see if it
|
||||
// is part of the group that caused grouping to be disabled.
|
||||
@@ -1774,7 +1735,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// empty so must have made a mistake
|
||||
if len(f.listRempties) == 0 {
|
||||
if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping {
|
||||
fs.Logf(f, "Re-enabling ListR as previous detection was in error")
|
||||
fs.Debugf(f, "Re-enabling ListR as previous detection was in error")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1829,21 +1790,33 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
// Send a job to the input channel if not closed. If the job
|
||||
// won't fit then queue it in the overflow slice.
|
||||
//
|
||||
// This will not block if the channel is full.
|
||||
sendJob := func(job listREntry) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- job:
|
||||
// Adding the wg after we've entered the item is
|
||||
// safe here because we know when the callback
|
||||
// is called we are holding a waitgroup.
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
}
|
||||
if in == nil {
|
||||
return
|
||||
}
|
||||
wg.Add(1)
|
||||
select {
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the entry to the caller, queueing any directories as new jobs
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
if d, isDir := entry.(*fs.Dir); isDir {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
sendJob(job)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
@@ -1852,7 +1825,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb)
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
}
|
||||
go func() {
|
||||
// wait until the all directories are processed
|
||||
@@ -2218,10 +2191,9 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
})
|
||||
}
|
||||
|
||||
// Rmdir deletes a directory
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// purgeCheck removes the dir directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := path.Join(f.root, dir)
|
||||
dc := f.dirCache
|
||||
directoryID, err := dc.FindDir(ctx, dir, false)
|
||||
@@ -2234,20 +2206,22 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||
return true
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||
return true
|
||||
}
|
||||
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
|
||||
trashedFiles = true
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return errors.Errorf("directory not empty")
|
||||
}
|
||||
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
|
||||
trashedFiles = true
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return errors.Errorf("directory not empty")
|
||||
}
|
||||
if root != "" {
|
||||
// trash the directory if it had trashed files
|
||||
@@ -2257,6 +2231,8 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if check {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
@@ -2265,6 +2241,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes a directory
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
@@ -2282,13 +2265,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
var srcObj *baseObject
|
||||
ext := ""
|
||||
readDescription := false
|
||||
isDoc := false
|
||||
switch src := src.(type) {
|
||||
case *Object:
|
||||
srcObj = &src.baseObject
|
||||
case *documentObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
readDescription = true
|
||||
isDoc = true
|
||||
case *linkObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
default:
|
||||
@@ -2296,6 +2279,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Look to see if there is an existing object before we remove
|
||||
// the extension from the remote
|
||||
existingObject, _ := f.NewObject(ctx, remote)
|
||||
|
||||
// Adjust the remote name to be without the extension if we
|
||||
// are about to create a doc.
|
||||
if ext != "" {
|
||||
if !strings.HasSuffix(remote, ext) {
|
||||
fs.Debugf(src, "Can't copy - not same document type")
|
||||
@@ -2304,15 +2293,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote = remote[:len(remote)-len(ext)]
|
||||
}
|
||||
|
||||
// Look to see if there is an existing object
|
||||
existingObject, _ := f.NewObject(ctx, remote)
|
||||
|
||||
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if readDescription {
|
||||
if isDoc {
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
@@ -2344,6 +2330,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Google docs aren't preserving their mod time after copy, so set them explicitly
|
||||
// See: https://github.com/rclone/rclone/issues/4517
|
||||
//
|
||||
// FIXME remove this when google fixes the problem!
|
||||
if isDoc {
|
||||
// A short sleep is needed here in order to make the
|
||||
// change effective, without it is is ignored. This is
|
||||
// probably some eventual consistency nastiness.
|
||||
sleepTime := 2 * time.Second
|
||||
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
err = newObject.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if existingObject != nil {
|
||||
err = existingObject.Remove(ctx)
|
||||
if err != nil {
|
||||
@@ -2358,23 +2360,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.delete(ctx, shortcutID(rootID), f.opt.UseTrash)
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
@@ -2877,6 +2867,98 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
return dstFs.newObjectWithInfo(dstPath, info)
|
||||
}
|
||||
|
||||
// List all team drives
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
||||
drives = []*drive.TeamDrive{}
|
||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing team drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.TeamDrives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listTeamDrives.PageToken(teamDrives.NextPageToken)
|
||||
}
|
||||
return drives, nil
|
||||
}
|
||||
|
||||
type unTrashResult struct {
|
||||
Untrashed int
|
||||
Errors int
|
||||
}
|
||||
|
||||
func (r unTrashResult) Error() string {
|
||||
return fmt.Sprintf("%d errors while untrashing - see log", r.Errors)
|
||||
}
|
||||
|
||||
// Restore the trashed files from dir, directoryID recursing if needed
|
||||
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
|
||||
directoryID = actualID(directoryID)
|
||||
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
|
||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
remote := path.Join(dir, item.Name)
|
||||
if item.ExplicitlyTrashed {
|
||||
fs.Infof(remote, "restoring %q", item.Id)
|
||||
if operations.SkipDestructive(ctx, remote, "restore") {
|
||||
return false
|
||||
}
|
||||
update := drive.File{
|
||||
ForceSendFields: []string{"Trashed"}, // necessary to set false value
|
||||
Trashed: false,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.svc.Files.Update(item.Id, &update).
|
||||
SupportsAllDrives(true).
|
||||
Fields("trashed").
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to restore")
|
||||
r.Errors++
|
||||
fs.Errorf(remote, "%v", err)
|
||||
} else {
|
||||
r.Untrashed++
|
||||
}
|
||||
}
|
||||
if recurse && item.MimeType == "application/vnd.google-apps.folder" {
|
||||
if !isShortcutID(item.Id) {
|
||||
rNew, _ := f.unTrash(ctx, remote, item.Id, recurse)
|
||||
r.Untrashed += rNew.Untrashed
|
||||
r.Errors += rNew.Errors
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
if r.Errors != 0 {
|
||||
return r, r
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Untrash dir
|
||||
func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTrashResult, err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
r.Errors++
|
||||
return r, err
|
||||
}
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -2928,6 +3010,55 @@ authenticated with "drive2:" can't read files from "drive:".
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
},
|
||||
}, {
|
||||
Name: "drives",
|
||||
Short: "List the shared drives available to this account",
|
||||
Long: `This command lists the shared drives (teamdrives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend drives drive:
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Short: "Untrash files and directories",
|
||||
Long: `This command untrashes all the files and directories in the directory
|
||||
passed in recursively.
|
||||
|
||||
Usage:
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -2991,6 +3122,14 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
case "drives":
|
||||
return f.listTeamDrives(ctx)
|
||||
case "untrash":
|
||||
dir := ""
|
||||
if len(arg) > 0 {
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
||||
@@ -10,13 +10,16 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
@@ -361,6 +364,50 @@ func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
|
||||
func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Make some objects, one in a subdir
|
||||
contents := random.String(100)
|
||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
|
||||
// Check objects
|
||||
checkObjects := func() {
|
||||
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
|
||||
file1,
|
||||
file2,
|
||||
}, []string{
|
||||
"trashDir/subdir",
|
||||
}, f.Precision())
|
||||
}
|
||||
checkObjects()
|
||||
|
||||
// Make sure we are using the trash
|
||||
require.Equal(t, true, f.opt.UseTrash)
|
||||
|
||||
// Remove the object and the dir
|
||||
require.NoError(t, obj1.Remove(ctx))
|
||||
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
|
||||
|
||||
// Check objects gone
|
||||
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
|
||||
|
||||
// Restore the object and directory
|
||||
r, err := f.unTrashDir(ctx, "trashDir", true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
|
||||
|
||||
// Check objects restored
|
||||
checkObjects()
|
||||
|
||||
// Remove the test dir
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -376,6 +423,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -125,13 +125,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
@@ -161,7 +155,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -611,10 +605,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't remove root
|
||||
@@ -622,31 +615,33 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errors.New("can't remove root directory")
|
||||
}
|
||||
|
||||
// check directory exists
|
||||
_, err := f.getDirMetadata(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if check {
|
||||
// check directory exists
|
||||
_, err = f.getDirMetadata(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
}
|
||||
|
||||
// remove it
|
||||
@@ -657,6 +652,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
@@ -719,15 +721,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -28,6 +29,20 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
|
||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
if size > int64(300e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
|
||||
@@ -88,13 +88,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
@@ -261,7 +255,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -847,20 +841,27 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
var rewriteResponse *storage.RewriteResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if rewriteResponse.Done {
|
||||
break
|
||||
}
|
||||
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
|
||||
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
dstObj.setMetaData(rewriteResponse.Resource)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -110,13 +109,7 @@ func init() {
|
||||
`)
|
||||
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
Default: false,
|
||||
Help: `Set to make the Google Photos backend read only.
|
||||
@@ -139,7 +132,7 @@ you want to read the media.`,
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -63,13 +62,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append([]fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}}, swift.SharedOptions...),
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -353,7 +353,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -373,6 +373,9 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -384,7 +387,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -551,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -728,6 +730,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
if err == fs.ErrorNotAFile {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -1070,8 +1075,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
@@ -1087,8 +1092,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1192,18 +1196,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
@@ -1477,6 +1469,8 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
size := src.Size()
|
||||
md5String, err := src.Hash(ctx, hash.MD5)
|
||||
if err != nil || md5String == "" {
|
||||
|
||||
@@ -4,6 +4,7 @@ package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -15,6 +16,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build windows plan9
|
||||
// +build windows plan9 js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !windows,!plan9
|
||||
// +build !windows,!plan9,!js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -144,6 +144,17 @@ the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -166,6 +177,7 @@ type Options struct {
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -542,6 +554,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision of the file system
|
||||
func (f *Fs) Precision() (precision time.Duration) {
|
||||
if f.opt.NoSetModTime {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
f.precisionOk.Do(func() {
|
||||
f.precision = f.readPrecision()
|
||||
})
|
||||
@@ -600,20 +616,25 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fi, err := f.lstat(f.root)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
dir = f.localPath(dir)
|
||||
fi, err := f.lstat(dir)
|
||||
if err != nil {
|
||||
// already purged
|
||||
if os.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return errors.Errorf("can't purge non directory: %q", f.root)
|
||||
return errors.Errorf("can't purge non directory: %q", dir)
|
||||
}
|
||||
return os.RemoveAll(f.root)
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -878,6 +899,9 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.NoSetModTime {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
@@ -1189,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1207,6 +1231,15 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -89,9 +88,6 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -121,9 +117,6 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -142,9 +135,7 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,71 +159,6 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -655,9 +656,14 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
@@ -671,7 +677,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
modTime: time.Unix(mTime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
@@ -1162,12 +1168,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeWithCheck(ctx, dir, true, "rmdir")
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the root directory
|
||||
// Purge deletes all the files in the directory
|
||||
// Optional interface: Only implement this if you have a way of deleting
|
||||
// all the files quicker than just running Remove() on the result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// fs.Debugf(f, ">>> Purge")
|
||||
return f.purgeWithCheck(ctx, "", false, "purge")
|
||||
return f.purgeWithCheck(ctx, dir, false, "purge")
|
||||
}
|
||||
|
||||
// purgeWithCheck() removes the root directory.
|
||||
@@ -1861,30 +1867,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardURL = url
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2116,7 +2122,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2127,10 +2144,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2151,18 +2165,36 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream := &endHandler{
|
||||
wrapStream = &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -669,13 +669,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
||||
|
||||
@@ -410,3 +410,28 @@ func (i *Item) GetParentReference() *ItemReference {
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
// User details for each version
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
}
|
||||
|
||||
// LastModifiedBy for each version
|
||||
type LastModifiedBy struct {
|
||||
User User `json:"user"`
|
||||
}
|
||||
|
||||
// Version info
|
||||
type Version struct {
|
||||
ID string `json:"id"`
|
||||
LastModifiedDateTime time.Time `json:"lastModifiedDateTime"`
|
||||
Size int `json:"size"`
|
||||
LastModifiedBy LastModifiedBy `json:"lastModifiedBy"`
|
||||
}
|
||||
|
||||
// VersionsResponse is returned from /versions
|
||||
type VersionsResponse struct {
|
||||
Versions []Version `json:"value"`
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,6 +27,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -238,13 +241,7 @@ func init() {
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
config.SaveConfig()
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Microsoft App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
@@ -284,6 +281,23 @@ different Onedrives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
Help: `Remove all versions on modifying operations
|
||||
|
||||
Onedrive for business creates versions when rclone uploads new files
|
||||
overwriting an existing one and when it sets the modification time.
|
||||
|
||||
These versions take up space out of the quota.
|
||||
|
||||
This flag checks for versions after file upload and setting
|
||||
modification time and removes all but the last version.
|
||||
|
||||
**NB** Onedrive personal can't currently delete versions so don't use
|
||||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -330,7 +344,7 @@ configurations.`,
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -341,6 +355,7 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1073,13 +1088,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -1232,6 +1247,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
@@ -1275,6 +1294,73 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return result.Link.WebURL, nil
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, fs.Config.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
return errors.New("internal error: not a onedrive object")
|
||||
}
|
||||
wg.Add(1)
|
||||
token <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-token
|
||||
wg.Done()
|
||||
}()
|
||||
err := o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
wg.Wait()
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersions(ctx context.Context) error {
|
||||
opts := newOptsCall(o.id, "GET", "/versions")
|
||||
var versions api.VersionsResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(versions.Versions) < 2 {
|
||||
return nil
|
||||
}
|
||||
for _, version := range versions.Versions[1:] {
|
||||
err = o.deleteVersion(ctx, version.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
||||
if operations.SkipDestructive(ctx, fmt.Sprintf("%s of %s", ID, o.remote), "delete version") {
|
||||
return nil
|
||||
}
|
||||
fs.Infof(o, "removing version %q", ID)
|
||||
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts.NoResponse = true
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1438,6 +1524,13 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
// Remove versions if required
|
||||
if o.fs.opt.NoVersions {
|
||||
err := o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -1744,6 +1837,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// If updating the file then remove versions
|
||||
if o.fs.opt.NoVersions && o.hasMetaData {
|
||||
err = o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -1840,6 +1941,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
||||
@@ -506,13 +506,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
|
||||
@@ -104,8 +104,9 @@ type ItemResult struct {
|
||||
|
||||
// Hashes contains the supported hashes
|
||||
type Hashes struct {
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA1 string `json:"sha1"`
|
||||
MD5 string `json:"md5"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// UploadFileResponse is the response from /uploadfile
|
||||
|
||||
@@ -103,13 +103,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -128,10 +122,20 @@ func init() {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
|
||||
This is normally set when rclone initially does the oauth connection.`,
|
||||
This is normally set when rclone initially does the oauth connection,
|
||||
however you will need to set it by hand if you are using remote config
|
||||
with rclone authorize.
|
||||
`,
|
||||
Default: defaultHostname,
|
||||
Advanced: true,
|
||||
}},
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: defaultHostname,
|
||||
Help: "Original/US region",
|
||||
}, {
|
||||
Value: "eapi.pcloud.com",
|
||||
Help: "EU region",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -671,13 +675,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
@@ -820,14 +824,19 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
|
||||
}
|
||||
|
||||
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
|
||||
obj, err := f.NewObject(ctx, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o := obj.(*Object)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/getfilepublink",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var result api.PubLinkResult
|
||||
opts.Parameters.Set("path", path)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -840,11 +849,6 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.linkFile(ctx, remote, expire)
|
||||
@@ -881,6 +885,13 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
// support SHA256 yet).
|
||||
//
|
||||
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
||||
if f.opt.Hostname == "eapi.pcloud.com" {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.MD5 | hash.SHA1)
|
||||
}
|
||||
|
||||
|
||||
@@ -609,13 +609,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
|
||||
@@ -458,10 +458,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
|
||||
root := strings.Trim(path.Join(f.root, dir), "/")
|
||||
@@ -478,18 +477,20 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
}
|
||||
dirID := atoi(directoryID)
|
||||
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
if check {
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
}
|
||||
|
||||
// remove it
|
||||
@@ -502,35 +503,26 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
rootIDs, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := atoi(rootIDs)
|
||||
// Let putio delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", rootID)
|
||||
err = f.client.Files.Delete(ctx, rootID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.ResetRoot()
|
||||
return err
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test QingStor filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 js
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Upload object to QingStor
|
||||
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
684
backend/s3/s3.go
684
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -584,29 +584,38 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes the directory or library if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
libraryName, dirPath := f.splitPath(dir)
|
||||
libraryID, err := f.getLibraryID(ctx, libraryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(directoryEntries) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
if check {
|
||||
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(directoryEntries) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
|
||||
if dirPath == "" || dirPath == "/" {
|
||||
return f.deleteLibrary(ctx, libraryID)
|
||||
}
|
||||
return f.deleteDir(ctx, libraryID, dirPath)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory or library if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.ListRer ====================
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -893,33 +902,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// ==================== Optional Interface fs.Purger ====================
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.libraryName == "" {
|
||||
return errors.New("Cannot delete from the root of the server. Please select a library")
|
||||
}
|
||||
libraryID, err := f.getLibraryID(ctx, f.libraryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
// Delete library
|
||||
err = f.deleteLibrary(ctx, libraryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.deleteDir(ctx, libraryID, f.rootDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.CleanUpper ====================
|
||||
@@ -1014,7 +1004,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(int64(library.Size))
|
||||
d.SetSize(library.Size)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -164,6 +164,18 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "subsystem",
|
||||
Default: "sftp",
|
||||
Help: "Specifies the SSH2 subsystem on the remote host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_command",
|
||||
Default: "",
|
||||
Help: `Specifies the path or command to run a sftp server on the remote host.
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -187,6 +199,8 @@ type Options struct {
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -290,7 +304,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
c.sftpClient, err = sftp.NewClient(c.sshClient)
|
||||
c.sftpClient, err = f.newSftpClient(c.sshClient)
|
||||
if err != nil {
|
||||
_ = c.sshClient.Close()
|
||||
return nil, errors.Wrap(err, "couldn't initialise SFTP")
|
||||
@@ -299,6 +313,35 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw, err := s.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr, err := s.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.opt.ServerCommand != "" {
|
||||
if err := s.Start(f.opt.ServerCommand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := s.RequestSubsystem(f.opt.Subsystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
f.poolMu.Lock()
|
||||
@@ -1044,7 +1087,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -853,8 +853,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// updateItem patches a file or folder
|
||||
|
||||
@@ -895,12 +895,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// Caution: Deleting a folder may orphan objects. It's important
|
||||
// to remove the contents of the folder before you delete the
|
||||
// folder. That's because removing a folder using DELETE does not
|
||||
@@ -920,7 +920,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.opt.HardDelete {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// moveFile moves a file server side
|
||||
|
||||
@@ -840,17 +840,21 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Implemented here so we can make sure we delete directory markers
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
// Delete all the files including the directory markers
|
||||
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, true, func(entry fs.DirEntry) error {
|
||||
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -864,7 +868,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Rmdir(ctx, "")
|
||||
return f.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -1111,13 +1115,18 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
segmentsContainer, _, err := o.getSegmentsDlo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
except = path.Join(o.remote, except)
|
||||
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
|
||||
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
||||
@@ -1326,7 +1335,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.md5 = rxHeaders["Etag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
if inCount != nil {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -67,31 +66,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
obj := entries[0].(*upstream.Object)
|
||||
return obj.Update(ctx, in, src, options...)
|
||||
}
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(entries))
|
||||
writers := make([]io.Writer, len(entries))
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
for i := range entries {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(entries)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(entries), in)
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
@@ -100,6 +77,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
errs[len(entries)] = <-errChan
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package policy
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -20,12 +19,10 @@ type EpRand struct {
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
|
||||
@@ -145,11 +145,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -162,13 +167,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
for _, r := range f.upstreams {
|
||||
if r.Features().Purge == nil {
|
||||
return fs.ErrorCantPurge
|
||||
@@ -180,7 +185,10 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Features().Purge(ctx)
|
||||
err := upstreams[i].Features().Purge(ctx, dir)
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
@@ -382,6 +390,37 @@ func (f *Fs) DirCacheFlush() {
|
||||
})
|
||||
}
|
||||
|
||||
// Tee in into n outputs
|
||||
//
|
||||
// When finished read the error from the channel
|
||||
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
|
||||
readers := make([]io.Reader, n)
|
||||
pipeWriters := make([]*io.PipeWriter, n)
|
||||
writers := make([]io.Writer, n)
|
||||
errChan := make(chan error, 1)
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], pipeWriters[i], writers[i] = r, w, bw
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, 2*n+1)
|
||||
_, copyErr := io.Copy(mw, in)
|
||||
es[2*n] = copyErr
|
||||
// Flush the buffers
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
// Close the underlying pipes
|
||||
for i, pw := range pipeWriters {
|
||||
es[2*i] = pw.CloseWithError(copyErr)
|
||||
}
|
||||
errChan <- Errors(es).Err()
|
||||
}()
|
||||
return readers, errChan
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
@@ -409,31 +448,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
e, err := f.wrapEntries(u.WrapObject(o))
|
||||
return e.(*Object), err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(upstreams))
|
||||
writers := make([]io.Writer, len(upstreams))
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(upstreams)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(upstreams), in)
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
objs := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
@@ -450,6 +467,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
errs[len(upstreams)] = <-errChan
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -504,6 +522,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
usg, err := u.About(ctx)
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -802,6 +823,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -153,3 +153,29 @@ func TestPolicy2(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "all"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,6 +97,7 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = myFs
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -335,6 +336,9 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
||||
usage, err := f.RootFs.Features().About(ctx)
|
||||
if err != nil {
|
||||
f.cacheUpdate = false
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if lock {
|
||||
|
||||
@@ -686,8 +686,8 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) {
|
||||
// defer log.Trace(dirPath, "")("err=%v", &err)
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
@@ -699,6 +699,27 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
return f.mkdir(ctx, parent)
|
||||
}
|
||||
|
||||
// _dirExists - list dirPath to see if it exists
|
||||
//
|
||||
// dirPath should be a native path ending in a /
|
||||
func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: dirPath,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
// We assume the root is already created
|
||||
@@ -719,19 +740,29 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
// Check if it already exists. The response code for this isn't
|
||||
// defined in the RFC so the implementations vary wildly.
|
||||
//
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// 4shared returns a 409/StatusConflict here which clashes
|
||||
// horribly with the intermediate paths don't exist meaning. So
|
||||
// check to see if actually exists. This will correct other
|
||||
// error codes too.
|
||||
if f._dirExists(ctx, dirPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(ctx, dirPath)
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) (err error) {
|
||||
// defer log.Trace(dirPath, "")("err=%v", &err)
|
||||
err = f._mkdir(ctx, dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
@@ -868,13 +899,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.copyOrMove(ctx, src, remote, "COPY")
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -1098,10 +1129,14 @@ func (o *Object) Storable() bool {
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: o.filePath(),
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
|
||||
@@ -67,13 +67,7 @@ func init() {
|
||||
return
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Yandex Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Yandex Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -81,7 +75,7 @@ func init() {
|
||||
// it doesn't seem worth making an exception for this
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -637,13 +631,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
cgo = flag.Bool("cgo", false, "Use cgo for the build")
|
||||
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
|
||||
tags = flag.String("tags", "", "Space separated list of build tags")
|
||||
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
|
||||
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
|
||||
)
|
||||
|
||||
@@ -45,7 +46,6 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"darwin/386",
|
||||
"darwin/amd64",
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
@@ -67,6 +67,7 @@ var osarches = []string{
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
"js/wasm",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
@@ -280,7 +281,7 @@ func stripVersion(goarch string) string {
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
@@ -298,11 +299,17 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
}
|
||||
if *buildmode != "" {
|
||||
args = append(args,
|
||||
"-buildmode", *buildmode,
|
||||
)
|
||||
}
|
||||
args = append(args,
|
||||
"..",
|
||||
)
|
||||
env := []string{
|
||||
"GOOS=" + goos,
|
||||
"GOARCH=" + stripVersion(goarch),
|
||||
@@ -321,14 +328,16 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
return false
|
||||
}
|
||||
if !*compileOnly {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
|
||||
if goos != "js" {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
// tidy up
|
||||
|
||||
@@ -15,10 +15,12 @@ description: |
|
||||
vendor: "rclone"
|
||||
homepage: "https://rclone.org"
|
||||
license: "MIT"
|
||||
# No longer supported? See https://github.com/goreleaser/nfpm/issues/144
|
||||
# bindir: "/usr/bin"
|
||||
files:
|
||||
./rclone: "/usr/bin/rclone"
|
||||
./README.html: "/usr/share/doc/rclone/README.html"
|
||||
./README.txt: "/usr/share/doc/rclone/README.txt"
|
||||
./rclone.1: "/usr/share/man/man1/rclone.1"
|
||||
contents:
|
||||
- src: ./rclone
|
||||
dst: /usr/bin/rclone
|
||||
- src: ./README.html
|
||||
dst: /usr/share/doc/rclone/README.html
|
||||
- src: ./README.txt
|
||||
dst: /usr/share/doc/rclone/README.txt
|
||||
- src: ./rclone.1
|
||||
dst: /usr/share/man/man1/rclone.1
|
||||
|
||||
26
bin/test-repeat-vfs.sh
Executable file
26
bin/test-repeat-vfs.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# Thrash the VFS tests
|
||||
|
||||
set -e
|
||||
|
||||
# Optionally set the iterations with the first parameter
|
||||
iterations=${1:-100}
|
||||
|
||||
base=$(dirname $(dirname $(realpath "$0")))
|
||||
echo ${base}
|
||||
run=${base}/bin/test-repeat.sh
|
||||
echo ${run}
|
||||
|
||||
testdirs="
|
||||
vfs
|
||||
vfs/vfscache
|
||||
vfs/vfscache/writeback
|
||||
vfs/vfscache/downloaders
|
||||
cmd/cmount
|
||||
"
|
||||
|
||||
for testdir in ${testdirs}; do
|
||||
echo "Testing ${testdir} with ${iterations} iterations"
|
||||
cd ${base}/${testdir}
|
||||
${run} -i=${iterations} -race -tags=cmount
|
||||
done
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9
|
||||
// +build !plan9,!js
|
||||
|
||||
package cachestats
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
// +build plan9 js
|
||||
|
||||
package cachestats
|
||||
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||
and |--error| flags write paths, one per line, to the file name (or
|
||||
stdout if it is |-|) supplied. What they write is described in the
|
||||
help below. For example |--differ| will write all paths which are
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
10
cmd/cmd.go
10
cmd/cmd.go
@@ -9,7 +9,6 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -89,8 +89,10 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
||||
f, err := cache.Get(remote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, path.Base(fsPath)
|
||||
case nil:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, ""
|
||||
default:
|
||||
err = fs.CountError(err)
|
||||
@@ -139,6 +141,7 @@ func newFsDir(remote string) fs.Fs {
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -197,6 +200,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
||||
return
|
||||
}
|
||||
|
||||
@@ -508,7 +512,9 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
if err := random.Seed(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
126
cmd/cmount/fs.go
126
cmd/cmount/fs.go
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
@@ -32,10 +31,10 @@ type FS struct {
|
||||
}
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
func NewFS(VFS *vfs.VFS) *FS {
|
||||
fsys := &FS{
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
VFS: VFS,
|
||||
f: VFS.Fs(),
|
||||
ready: make(chan (struct{})),
|
||||
}
|
||||
return fsys
|
||||
@@ -218,12 +217,18 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
itemsRead := -1
|
||||
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
|
||||
|
||||
node, errc := fsys.getHandle(fh)
|
||||
dir, errc := fsys.lookupDir(dirPath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
|
||||
items, err := node.Readdir(-1)
|
||||
// We can't seek in directories and FUSE should know that so
|
||||
// return an error if ofst is ever set.
|
||||
if ofst > 0 {
|
||||
return -fuse.ESPIPE
|
||||
}
|
||||
|
||||
nodes, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -232,7 +237,7 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// for getattr (but FUSE only looks at st_ino and the
|
||||
// file-type bits of st_mode).
|
||||
//
|
||||
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
|
||||
// We have called host.SetCapReaddirPlus() so WinFsp will
|
||||
// use the full stat information - a Useful optimization on
|
||||
// Windows.
|
||||
//
|
||||
@@ -243,25 +248,19 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// directory is read in a single readdir operation.
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
for _, item := range items {
|
||||
node, ok := item.(vfs.Node)
|
||||
if ok {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
if usingReaddirPlus {
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
} else {
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
for _, node := range nodes {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
// It is very cheap at this point so supply it regardless of OS capabilities
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
}
|
||||
itemsRead = len(items)
|
||||
itemsRead = len(nodes)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -290,41 +289,65 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
defer log.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
// OpenEx opens a file
|
||||
func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) {
|
||||
defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh)
|
||||
fi.Fh = fhUnset
|
||||
|
||||
// translate the fuse flags to os flags
|
||||
flags = translateOpenFlags(flags)
|
||||
flags := translateOpenFlags(fi.Flags)
|
||||
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// FIXME add support for unknown length files setting direct_io
|
||||
// See: https://github.com/billziss-gh/cgofuse/issues/38
|
||||
// If size unknown then use direct io to read
|
||||
if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 {
|
||||
fi.DirectIo = true
|
||||
}
|
||||
|
||||
return 0, fsys.openHandle(handle)
|
||||
fi.Fh = fsys.openHandle(handle)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
var fi = fuse.FileInfo_t{
|
||||
Flags: flags,
|
||||
}
|
||||
errc = fsys.OpenEx(path, &fi)
|
||||
return errc, fi.Fh
|
||||
}
|
||||
|
||||
// CreateEx creates and opens a file.
|
||||
func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) {
|
||||
defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh)
|
||||
fi.Fh = fhUnset
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
file, err := parentDir.Create(leaf, fi.Flags)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
fi.Fh = fsys.openHandle(handle)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Create creates and opens a file.
|
||||
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
|
||||
defer log.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc, fhUnset
|
||||
var fi = fuse.FileInfo_t{
|
||||
Flags: flags,
|
||||
}
|
||||
file, err := parentDir.Create(leaf, flags)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags = translateOpenFlags(flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openHandle(handle)
|
||||
errc = fsys.CreateEx(filePath, mode, &fi)
|
||||
return errc, fi.Fh
|
||||
}
|
||||
|
||||
// Truncate truncates a file to size
|
||||
@@ -595,3 +618,12 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
// NB O_SYNC isn't defined by fuse
|
||||
return outFlags
|
||||
}
|
||||
|
||||
// Make sure interfaces are satisfied
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
)
|
||||
|
||||
@@ -11,26 +11,15 @@ package cmount
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const (
|
||||
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
|
||||
// capability [Windows only]. A file system that has the readdir-plus capability can send
|
||||
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
|
||||
usingReaddirPlus = runtime.GOOS == "windows"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -38,78 +27,91 @@ func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
name = "mount"
|
||||
}
|
||||
mountlib.NewMountCommand(name, false, Mount)
|
||||
// Add mount to rc
|
||||
mountlib.NewMountCommand(name, false, mount)
|
||||
mountlib.AddRc("cmount", mount)
|
||||
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string, mountpoint string) (options []string) {
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", mountlib.AttrTimeout.Seconds()),
|
||||
"-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
}
|
||||
if mountlib.DebugFUSE {
|
||||
if opt.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
}
|
||||
|
||||
// OSX options
|
||||
if runtime.GOOS == "darwin" {
|
||||
if mountlib.NoAppleDouble {
|
||||
if opt.NoAppleDouble {
|
||||
options = append(options, "-o", "noappledouble")
|
||||
}
|
||||
if mountlib.NoAppleXattr {
|
||||
if opt.NoAppleXattr {
|
||||
options = append(options, "-o", "noapplexattr")
|
||||
}
|
||||
}
|
||||
|
||||
// determine if ExtraOptions already has an opt in
|
||||
hasOption := func(optionName string) bool {
|
||||
optionName += "="
|
||||
for _, option := range opt.ExtraOptions {
|
||||
if strings.HasPrefix(option, optionName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Windows options
|
||||
if runtime.GOOS == "windows" {
|
||||
// These cause WinFsp to mean the current user
|
||||
options = append(options, "-o", "uid=-1")
|
||||
options = append(options, "-o", "gid=-1")
|
||||
if !hasOption("uid") {
|
||||
options = append(options, "-o", "uid=-1")
|
||||
}
|
||||
if !hasOption("gid") {
|
||||
options = append(options, "-o", "gid=-1")
|
||||
}
|
||||
options = append(options, "--FileSystemName=rclone")
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
|
||||
if mountlib.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+mountlib.VolumeName)
|
||||
if opt.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+opt.VolumeName)
|
||||
}
|
||||
}
|
||||
if mountlib.AllowNonEmpty {
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
if opt.AllowRoot {
|
||||
options = append(options, "-o", "allow_root")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
if opt.DefaultPermissions {
|
||||
options = append(options, "-o", "default_permissions")
|
||||
}
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
if VFS.Opt.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
if opt.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(mountlib.DaemonTimeout.Seconds())))
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
}
|
||||
for _, option := range mountlib.ExtraOptions {
|
||||
for _, option := range opt.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range mountlib.ExtraFlags {
|
||||
for _, option := range opt.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
return options
|
||||
@@ -135,35 +137,39 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
// Check the mountpoint - in Windows the mountpoint mustn't exist before the mount
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "mountpoint")
|
||||
return nil, nil, errors.Wrap(err, "mountpoint")
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, nil, nil, errors.New("mountpoint is not a directory")
|
||||
return nil, nil, errors.New("mountpoint is not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
// Create underlying FS
|
||||
fsys := NewFS(f)
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
if usingReaddirPlus {
|
||||
host.SetCapReaddirPlus(true)
|
||||
}
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errChan <- errors.Errorf("mount failed: %v", r)
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
ok := host.Mount(mountpoint, options)
|
||||
if !ok {
|
||||
@@ -199,7 +205,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
select {
|
||||
case err := <-errChan:
|
||||
err = errors.Wrap(err, "mount stopped before calling Init")
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
case <-fsys.ready:
|
||||
}
|
||||
|
||||
@@ -214,53 +220,5 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
}
|
||||
}
|
||||
|
||||
return fsys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
// Note cgofuse unmounts the fs on SIGINT etc
|
||||
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
return errChan, unmount, nil
|
||||
}
|
||||
|
||||
32
cmd/cmount/mount_brew.go
Normal file
32
cmd/cmount/mount_brew.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
|
||||
// +build brew
|
||||
// +build darwin
|
||||
|
||||
package cmount
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "mount"
|
||||
cmd := mountlib.NewMountCommand(name, false, mount)
|
||||
cmd.Aliases = append(cmd.Aliases, "cmount")
|
||||
mountlib.AddRc("cmount", mount)
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
|
||||
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
|
||||
"Please install the binaries available at https://rclone." +
|
||||
"org/downloads/ instead if you want to use the mount command")
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
|
||||
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -22,7 +22,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
|
||||
@@ -15,7 +15,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
@@ -22,19 +22,32 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate files and delete/rename them.`,
|
||||
Short: `Interactively find duplicate filenames and delete/rename them.`,
|
||||
Long: `
|
||||
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different.
|
||||
|
||||
This is only useful with backends like Google Drive which can have
|
||||
duplicate file names. It can be run on wrapping backends (eg crypt) if
|
||||
they wrap a backend which supports duplicate file names.
|
||||
|
||||
In the first pass it will merge directories with the same name. It
|
||||
will do this iteratively until all the identical directories have been
|
||||
merged.
|
||||
will do this iteratively until all the identically named directories
|
||||
have been merged.
|
||||
|
||||
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive.
|
||||
In the second pass, for every group of duplicate file names, it will
|
||||
delete all but one identical files it finds without confirmation.
|
||||
This means that for most duplicated files the ` + "`dedupe`" + `
|
||||
command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
@@ -52,26 +65,26 @@ Before - with duplicates
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
|
||||
Now the ` + "`" + `dedupe` + "`" + ` session
|
||||
Now the ` + "`dedupe`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: Found 3 files with duplicates names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
*vfs.Dir
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
@@ -27,7 +28,7 @@ var _ fusefs.Node = (*Dir)(nil)
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Valid = mountlib.AttrTimeout
|
||||
a.Valid = d.fsys.opt.AttrTimeout
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
|
||||
@@ -75,7 +76,7 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
resp.EntryValid = mountlib.AttrTimeout
|
||||
resp.EntryValid = d.fsys.opt.AttrTimeout
|
||||
// Check the mnode to see if it has a fuse Node cached
|
||||
// We must return the same fuse nodes for vfs Nodes
|
||||
node, ok := mnode.Sys().(fusefs.Node)
|
||||
@@ -84,9 +85,9 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
}
|
||||
switch x := mnode.(type) {
|
||||
case *vfs.File:
|
||||
node = &File{x}
|
||||
node = &File{x, d.fsys}
|
||||
case *vfs.Dir:
|
||||
node = &Dir{x}
|
||||
node = &Dir{x, d.fsys}
|
||||
default:
|
||||
panic("bad type")
|
||||
}
|
||||
@@ -106,6 +107,13 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
dirents = append(dirents, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: ".",
|
||||
}, fuse.Dirent{
|
||||
Type: fuse.DT_Dir,
|
||||
Name: "..",
|
||||
})
|
||||
for _, node := range items {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
@@ -139,7 +147,7 @@ func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.Cr
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
node = &File{file}
|
||||
node = &File{file, d.fsys}
|
||||
file.SetSys(node) // cache the FUSE node for later
|
||||
return node, &FileHandle{fh}, err
|
||||
}
|
||||
@@ -153,7 +161,7 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.No
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
node = &Dir{dir}
|
||||
node = &Dir{dir, d.fsys}
|
||||
dir.SetSys(node) // cache the FUSE node for later
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
// File represents a file
|
||||
type File struct {
|
||||
*vfs.File
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
@@ -24,7 +24,7 @@ var _ fusefs.Node = (*File)(nil)
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
|
||||
a.Valid = mountlib.AttrTimeout
|
||||
a.Valid = f.fsys.opt.AttrTimeout
|
||||
modTime := f.File.ModTime()
|
||||
Size := uint64(f.File.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
|
||||
@@ -15,23 +15,24 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
*vfs.VFS
|
||||
f fs.Fs
|
||||
f fs.Fs
|
||||
opt *mountlib.Options
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.FS = (*FS)(nil)
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS {
|
||||
fsys := &FS{
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
VFS: VFS,
|
||||
f: VFS.Fs(),
|
||||
opt: opt,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
@@ -43,7 +44,7 @@ func (f *FS) Root() (node fusefs.Node, err error) {
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{root}, nil
|
||||
return &Dir{root, f}, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
|
||||
@@ -6,74 +6,67 @@ package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"runtime"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount", false, Mount)
|
||||
// Add mount to rc
|
||||
mountlib.NewMountCommand("mount", false, mount)
|
||||
mountlib.AddRc("mount", mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(device string) (options []fuse.MountOption) {
|
||||
func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options []fuse.MountOption) {
|
||||
options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.MaxReadahead(uint32(opt.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device),
|
||||
fuse.VolumeName(mountlib.VolumeName),
|
||||
fuse.VolumeName(opt.VolumeName),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.AsyncRead {
|
||||
if opt.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if mountlib.NoAppleDouble {
|
||||
if opt.NoAppleDouble {
|
||||
options = append(options, fuse.NoAppleDouble())
|
||||
}
|
||||
if mountlib.NoAppleXattr {
|
||||
if opt.NoAppleXattr {
|
||||
options = append(options, fuse.NoAppleXattr())
|
||||
}
|
||||
if mountlib.AllowNonEmpty {
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
if opt.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
if opt.AllowRoot {
|
||||
// options = append(options, fuse.AllowRoot())
|
||||
fs.Errorf(nil, "Ignoring --allow-root. Support has been removed upstream - see https://github.com/bazil/fuse/issues/144 for more info")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
if opt.DefaultPermissions {
|
||||
options = append(options, fuse.DefaultPermissions())
|
||||
}
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
if VFS.Opt.ReadOnly {
|
||||
options = append(options, fuse.ReadOnly())
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
if opt.WritebackCache {
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(mountlib.DaemonTimeout.Seconds()))))
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(opt.DaemonTimeout.Seconds()))))
|
||||
}
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
if len(opt.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
if len(mountlib.ExtraFlags) > 0 {
|
||||
if len(opt.ExtraFlags) > 0 {
|
||||
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
|
||||
}
|
||||
return options
|
||||
@@ -85,14 +78,25 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
|
||||
filesys := NewFS(f)
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
filesys := NewFS(VFS, opt)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
@@ -109,7 +113,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
unmount := func() error {
|
||||
@@ -118,63 +122,5 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
return fuse.Unmount(mountpoint)
|
||||
}
|
||||
|
||||
return filesys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
if mountlib.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.IgnoreSignals()
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
return errChan, unmount, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// Invert the build constraint: linux,go1.13 freebsd,go1.13
|
||||
//
|
||||
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
||||
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
||||
|
||||
@@ -33,13 +33,15 @@ import (
|
||||
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
|
||||
// for an example.
|
||||
type FileHandle struct {
|
||||
h vfs.Handle
|
||||
h vfs.Handle
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Create a new FileHandle
|
||||
func newFileHandle(h vfs.Handle) *FileHandle {
|
||||
func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
|
||||
return &FileHandle{
|
||||
h: h,
|
||||
h: h,
|
||||
fsys: fsys,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +117,7 @@ var _ fusefs.FileFsyncer = (*FileHandle)(nil)
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
func (f *FileHandle) Getattr(ctx context.Context, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "")("attr=%v, errno=%v", &out, &errno)
|
||||
setAttrOut(f.h.Node(), out)
|
||||
f.fsys.setAttrOut(f.h.Node(), out)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -125,7 +127,7 @@ var _ fusefs.FileGetattrer = (*FileHandle)(nil)
|
||||
func (f *FileHandle) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "in=%v", in)("attr=%v, errno=%v", &out, &errno)
|
||||
var err error
|
||||
setAttrOut(f.h.Node(), out)
|
||||
f.fsys.setAttrOut(f.h.Node(), out)
|
||||
size, ok := in.GetSize()
|
||||
if ok {
|
||||
err = f.h.Truncate(int64(size))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user