mirror of
https://github.com/rclone/rclone.git
synced 2026-01-27 14:53:20 +00:00
Compare commits
2 Commits
v1.53.2
...
fix-4293-v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f36afac89 | ||
|
|
6277b8bf03 |
85
.github/workflows/build.yml
vendored
85
.github/workflows/build.yml
vendored
@@ -19,23 +19,24 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'race', 'go1.11', 'go1.12', 'go1.13']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.15.x'
|
||||
gotags: 'cmount'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
@@ -43,7 +44,8 @@ jobs:
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -52,7 +54,8 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.15.x'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -62,55 +65,63 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: race
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.14
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -124,8 +135,6 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
@@ -135,10 +144,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -158,22 +167,10 @@ jobs:
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
@@ -225,8 +222,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
@@ -234,7 +231,7 @@ jobs:
|
||||
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
|
||||
# xgo \
|
||||
# -image=billziss/xgo-cgofuse \
|
||||
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
# -tags cmount \
|
||||
# -dest build \
|
||||
# .
|
||||
@@ -245,9 +242,9 @@ jobs:
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=mod -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -9,4 +9,3 @@ rclone.iml
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
|
||||
6362
MANUAL.html
generated
6362
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
8051
MANUAL.txt
generated
8051
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
59
Makefile
59
Makefile
@@ -7,29 +7,27 @@ RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# Next version
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
|
||||
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
|
||||
endif
|
||||
TAG_BRANCH := .$(BRANCH)
|
||||
BRANCH_PATH := branch/$(BRANCH)/
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
|
||||
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
|
||||
# TAG is current version + commit number + commit + branch
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
ifdef RELEASE_TAG
|
||||
TAG := $(RELEASE_TAG)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... )
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
@@ -59,6 +57,7 @@ vars:
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -76,10 +75,10 @@ test: rclone test_all
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@@ -165,11 +164,6 @@ validate_website: website
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
vendorball:
|
||||
go mod vendor
|
||||
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
|
||||
rm -rf vendor
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
@@ -227,33 +221,26 @@ fetch_binaries:
|
||||
serve: website
|
||||
cd docs && hugo server -v -w --disableFastRender
|
||||
|
||||
tag: retag doc
|
||||
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
|
||||
tag: doc
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(VERSION)\" -a -v
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
@echo "Version is $(VERSION)"
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next version is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
|
||||
startstable:
|
||||
@echo "Version is $(VERSION)"
|
||||
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_PATCH_VERSION)" > VERSION
|
||||
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -64,7 +64,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
56
RELEASE.md
56
RELEASE.md
@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
|
||||
|
||||
## Making a release
|
||||
|
||||
* git checkout master # see below for stable branch
|
||||
* git checkout master
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check GitHub actions build for master is Green
|
||||
@@ -25,13 +25,12 @@ This file describes how to make the various kinds of releases
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make vendorball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev # make startstable for stable branch
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, patreon post
|
||||
|
||||
Early in the next release cycle update the dependencies
|
||||
@@ -42,35 +41,60 @@ Early in the next release cycle update the dependencies
|
||||
* git add new files
|
||||
* git commit -a -v
|
||||
|
||||
If `make update` fails with errors like this:
|
||||
|
||||
```
|
||||
# github.com/cpuguy83/go-md2man/md2man
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
||||
```
|
||||
|
||||
Can be fixed with
|
||||
|
||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
||||
* GO111MODULE=on go mod tidy
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.52
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git cherry-pick any fixes
|
||||
* Do the steps as above
|
||||
* make startstable
|
||||
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
|
||||
* Test (see above)
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-stable
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
|
||||
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -47,5 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
||||
}
|
||||
|
||||
@@ -76,7 +76,23 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
@@ -127,7 +143,7 @@ underlying S3 storage.`,
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -921,8 +937,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -967,7 +967,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
dir := "" // forward compat!
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,!js,go1.13
|
||||
// +build !plan9,!solaris,go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris js !go1.13
|
||||
// +build plan9 solaris !go1.13
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1143,8 +1143,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
// if oldOnly is true then it deletes only non current files.
|
||||
//
|
||||
// Implemented here so we can make sure we delete old versions.
|
||||
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
bucket, directory := f.split(dir)
|
||||
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
@@ -1219,19 +1218,19 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
wg.Wait()
|
||||
|
||||
if !oldOnly {
|
||||
checkErr(f.Rmdir(ctx, dir))
|
||||
checkErr(f.Rmdir(ctx, ""))
|
||||
}
|
||||
return errReturn
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purge(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, "", true)
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
|
||||
}
|
||||
|
||||
// copy does a server side copy from dstObj <- srcObj
|
||||
@@ -1673,8 +1672,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set and not using DownloadURL otherwise by name
|
||||
if o.id != "" && o.fs.opt.DownloadURL == "" {
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -87,23 +87,26 @@ func init() {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
} else {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
@@ -111,9 +114,6 @@ func init() {
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -149,7 +149,7 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -247,7 +247,6 @@ type Options struct {
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -386,22 +385,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
var ts *oauthutil.TokenSource
|
||||
// If not using an accessToken, create an oauth client and tokensource
|
||||
if opt.AccessToken == "" {
|
||||
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
@@ -411,30 +404,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// If using an accessToken, set the Authorization header
|
||||
if f.opt.AccessToken != "" {
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
if ts != nil {
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Get rootFolderID
|
||||
@@ -856,8 +842,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
@@ -1272,10 +1258,8 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if o.fs.tokenRenewer != nil {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
}
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
29
backend/cache/cache.go
vendored
29
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -361,10 +361,15 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
|
||||
wrappedFs, wrapErr := cache.Get(remotePath)
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
|
||||
}
|
||||
|
||||
remotePath := fspath.JoinRootPath(wPath, rootPath)
|
||||
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
|
||||
}
|
||||
var fsErr error
|
||||
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
|
||||
@@ -385,7 +390,6 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
@@ -1698,20 +1702,17 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return f.Fs.Hashes()
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if dir == "" {
|
||||
// FIXME this isn't quite right as it should purge the dir prefix
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
}
|
||||
// Purge all files in the root and the root directory
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
return nil
|
||||
}
|
||||
|
||||
err := do(ctx, dir)
|
||||
err := do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
356
backend/cache/cache_internal_test.go
vendored
356
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -30,10 +31,13 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -49,7 +53,9 @@ const (
|
||||
|
||||
var (
|
||||
remoteName string
|
||||
mountDir string
|
||||
uploadDir string
|
||||
useMount bool
|
||||
runInstance *run
|
||||
errNotSupported = errors.New("not supported")
|
||||
decryptedToEncryptedRemotes = map[string]string{
|
||||
@@ -85,7 +91,9 @@ var (
|
||||
|
||||
func init() {
|
||||
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
|
||||
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
|
||||
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
|
||||
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
|
||||
}
|
||||
|
||||
// TestMain drives the tests
|
||||
@@ -93,7 +101,7 @@ func TestMain(m *testing.M) {
|
||||
goflag.Parse()
|
||||
var rc int
|
||||
|
||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
||||
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
|
||||
runInstance = newRun()
|
||||
rc = m.Run()
|
||||
os.Exit(rc)
|
||||
@@ -266,6 +274,31 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
require.Nil(t, obj)
|
||||
}
|
||||
|
||||
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
|
||||
if !runInstance.useMount {
|
||||
t.Skip("test needs mount mode")
|
||||
}
|
||||
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
var testData []byte
|
||||
if runInstance.rootIsCrypt {
|
||||
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
|
||||
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "test content", string(data))
|
||||
}
|
||||
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
@@ -661,6 +694,79 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
cacheExpire := rc.Calls.Get("cache/expire")
|
||||
assert.NotNil(t, cacheExpire)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
t.Skipf("needs mount")
|
||||
}
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("needs drive")
|
||||
}
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached file cleared")
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
require.Contains(t, m["message"], "cached directory cleared")
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
@@ -808,9 +914,15 @@ func TestInternalBug2117(t *testing.T) {
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
runDefaultCfgMap configmap.Simple
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
isMounted bool
|
||||
rootIsCrypt bool
|
||||
wrappedIsExternal bool
|
||||
unmountFn func() error
|
||||
unmountRes chan error
|
||||
vfs *vfs.VFS
|
||||
tempFiles []*os.File
|
||||
dbPath string
|
||||
chunkPath string
|
||||
@@ -820,7 +932,9 @@ type run struct {
|
||||
func newRun() *run {
|
||||
var err error
|
||||
r := &run{
|
||||
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
useMount: useMount,
|
||||
isMounted: false,
|
||||
}
|
||||
|
||||
// Read in all the defaults for all the options
|
||||
@@ -833,6 +947,32 @@ func newRun() *run {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create mount dir: %v", err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// Find a free drive letter
|
||||
drive := ""
|
||||
for letter := 'E'; letter <= 'Z'; letter++ {
|
||||
drive = string(letter) + ":"
|
||||
_, err := os.Stat(drive + "\\")
|
||||
if os.IsNotExist(err) {
|
||||
goto found
|
||||
}
|
||||
}
|
||||
log.Print("Couldn't find free drive letter for test")
|
||||
found:
|
||||
r.mntDir = drive
|
||||
}
|
||||
} else {
|
||||
r.mntDir = mountDir
|
||||
}
|
||||
log.Printf("Mount Dir: %v", r.mntDir)
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
@@ -946,21 +1086,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
if purge {
|
||||
_ = f.Features().Purge(context.Background(), "")
|
||||
_ = f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
if r.useMount && !r.isMounted {
|
||||
r.mountFs(t, f)
|
||||
}
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
if r.useMount && r.isMounted {
|
||||
r.unmountFs(t, f)
|
||||
}
|
||||
|
||||
err := f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
cfs.StopBackgroundRunners()
|
||||
|
||||
if r.useMount && runtime.GOOS != "windows" {
|
||||
err = os.RemoveAll(r.mntDir)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = os.RemoveAll(r.tmpUploadDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1000,11 +1152,37 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
|
||||
r.writeObjectBytes(t, f, remote, data)
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
err = r.retryBlock(func() error {
|
||||
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
|
||||
}, 3, time.Second*3)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else {
|
||||
r.writeObjectBytes(t, f, remote, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
|
||||
r.writeObjectReader(t, f, remote, in)
|
||||
defer func() {
|
||||
_ = in.Close()
|
||||
}()
|
||||
|
||||
if r.useMount {
|
||||
out, err := os.Create(path.Join(r.mntDir, remote))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = out.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else {
|
||||
r.writeObjectReader(t, f, remote, in)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
@@ -1021,6 +1199,10 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||
obj, err := f.Put(context.Background(), in, objInfo)
|
||||
require.NoError(t, err)
|
||||
if r.useMount {
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
@@ -1028,16 +1210,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
var err error
|
||||
var obj fs.Object
|
||||
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
if r.useMount {
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
} else {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
return obj
|
||||
@@ -1047,12 +1239,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
size := end - offset
|
||||
checkSample := make([]byte, size)
|
||||
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
if r.useMount {
|
||||
f, err := os.Open(path.Join(r.mntDir, remote))
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
}()
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
_, _ = f.Seek(offset, io.SeekStart)
|
||||
totalRead, err := io.ReadFull(f, checkSample)
|
||||
checkSample = checkSample[:totalRead]
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
}
|
||||
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
|
||||
|
||||
if !noLengthCheck && size != int64(len(checkSample)) {
|
||||
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
|
||||
}
|
||||
@@ -1075,19 +1285,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
|
||||
}
|
||||
|
||||
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
||||
err := f.Mkdir(context.Background(), remote)
|
||||
var err error
|
||||
if r.useMount {
|
||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||
} else {
|
||||
err = f.Mkdir(context.Background(), remote)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
var err error
|
||||
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
if r.useMount {
|
||||
err = os.Remove(path.Join(r.mntDir, remote))
|
||||
} else {
|
||||
err = obj.Remove(context.Background())
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
} else {
|
||||
err = obj.Remove(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -1096,10 +1315,18 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
||||
var err error
|
||||
var l []interface{}
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
if r.useMount {
|
||||
var list []os.FileInfo
|
||||
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
}
|
||||
return l, err
|
||||
}
|
||||
@@ -1128,7 +1355,13 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if rootFs.Features().DirMove != nil {
|
||||
if runInstance.useMount {
|
||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().DirMove != nil {
|
||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1144,7 +1377,13 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if rootFs.Features().Move != nil {
|
||||
if runInstance.useMount {
|
||||
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Move != nil {
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1164,7 +1403,13 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
var err error
|
||||
|
||||
if rootFs.Features().Copy != nil {
|
||||
if r.useMount {
|
||||
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Copy != nil {
|
||||
obj, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1184,6 +1429,13 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
@@ -1194,6 +1446,13 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
return fi.Size(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
@@ -1204,15 +1463,28 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
|
||||
var err error
|
||||
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
if r.useMount {
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = f.Close()
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(context.Background(), r, objInfo1)
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
reader := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(context.Background(), reader, objInfo1)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// +build !linux !go1.13
|
||||
// +build !darwin !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
// +build !windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
panic("mountFs not defined for this platform")
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
panic("unmountFs not defined for this platform")
|
||||
}
|
||||
79
backend/cache/cache_mount_unix_test.go
vendored
Normal file
79
backend/cache/cache_mount_unix_test.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/cmd/mount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
var options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
c, err := fuse.Mount(r.mntDir, options...)
|
||||
require.NoError(t, err)
|
||||
filesys := mount.NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
require.NoError(t, c.MountError)
|
||||
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = filesys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
125
backend/cache/cache_mount_windows_test.go
vendored
Normal file
125
backend/cache/cache_mount_windows_test.go
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
// +build windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/cmount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
|
||||
device := f.Name() + ":" + f.Root()
|
||||
options := []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"-o", "allow_other",
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
"--FileSystemName=rclone",
|
||||
}
|
||||
|
||||
fsys := cmount.NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(r.mntDir, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
if host.Unmount() {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err != nil
|
||||
}) {
|
||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-r.unmountRes:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second * 3):
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err == nil
|
||||
}) {
|
||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = fsys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 js
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
@@ -239,18 +238,15 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
baseName, basePath, err := fspath.Parse(remote)
|
||||
baseInfo, baseName, basePath, baseConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
if baseName != "" {
|
||||
baseName += ":"
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(basePath, rpath)
|
||||
baseFs, err := cache.Get(baseName + remotePath)
|
||||
baseFs, err := baseInfo.NewFs(baseName, remotePath, baseConfig)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", baseName, remotePath)
|
||||
}
|
||||
if !operations.CanServerSideMove(baseFs) {
|
||||
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
|
||||
@@ -262,7 +258,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
}
|
||||
cache.PinUntilFinalized(f.base, f)
|
||||
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
|
||||
|
||||
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
|
||||
@@ -276,7 +271,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// (yet can't satisfy fstest.CheckListing, will ignore)
|
||||
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
|
||||
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
|
||||
_, testErr := cache.Get(baseName + firstChunkPath)
|
||||
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
|
||||
if testErr == fs.ErrorIsFile {
|
||||
err = testErr
|
||||
}
|
||||
@@ -296,8 +291,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -960,8 +953,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
|
||||
}
|
||||
info := f.wrapInfo(src, chunkRemote, size)
|
||||
|
||||
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
|
||||
c.chunkLimit = c.chunkSize
|
||||
// TODO: handle range/limit options
|
||||
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
|
||||
if errChunk != nil {
|
||||
@@ -1170,14 +1161,10 @@ func (c *chunkingReader) updateHashes() {
|
||||
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
|
||||
if c.chunkLimit <= 0 {
|
||||
// Chunk complete - switch to next one.
|
||||
// Note #1:
|
||||
// We might not get here because some remotes (eg. box multi-uploader)
|
||||
// read the specified size exactly and skip the concluding EOF Read.
|
||||
// Then a check in the put loop will kick in.
|
||||
// Note #2:
|
||||
// The crypt backend after receiving EOF here will call Read again
|
||||
// and we must insist on returning EOF, so we postpone refilling
|
||||
// chunkLimit to the main loop.
|
||||
c.chunkLimit = c.chunkSize
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(buf)) > c.chunkLimit {
|
||||
@@ -1346,7 +1333,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.base.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
@@ -1357,12 +1344,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// As a result it removes not only composite chunker files with their
|
||||
// active chunks but also all hidden temporary chunks in the directory.
|
||||
//
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
do := f.base.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, dir)
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Remove an object (chunks and metadata, if any)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -159,25 +158,24 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
var wrappedFs fs.Fs
|
||||
if rpath == "" {
|
||||
wrappedFs, err = cache.Get(remote)
|
||||
} else {
|
||||
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = cache.Get(remotePath)
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
|
||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
@@ -186,7 +184,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
@@ -430,18 +427,18 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, f.cipher.EncryptDirName(dir))
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -36,7 +37,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -69,7 +69,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
listRGrouping = 50 // number of IDs to search at once when using ListR
|
||||
listRInputBuffer = 1000 // size of input buffer when using ListR
|
||||
)
|
||||
@@ -157,17 +157,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func driveOAuthOptions() []fs.Option {
|
||||
opts := []fs.Option{}
|
||||
for _, opt := range oauthutil.SharedOptions {
|
||||
if opt.Name == config.ConfigClientID {
|
||||
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
|
||||
}
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -203,7 +192,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nSetting your own is recommended.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
Examples: []fs.OptionExample{{
|
||||
@@ -229,6 +224,9 @@ Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
Note that if this is blank, the first time rclone runs it will fill it
|
||||
in with the ID of the root folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
@@ -291,11 +289,6 @@ commands (copy, sync, etc), and with all other commands too.`,
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "starred_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are starred.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: "",
|
||||
@@ -357,15 +350,27 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.
|
||||
|
||||
Note that if this is used then "root_folder_id" will be ignored.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: "Deprecated: no longer needed",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Use alternate export URLs for google documents export.,
|
||||
|
||||
If this option is set this instructs rclone to use an alternate set of
|
||||
export URLs for drive documents. Users have reported that the
|
||||
official export URLs can't export large documents, whereas these
|
||||
unofficial ones can.
|
||||
|
||||
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
|
||||
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
|
||||
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
@@ -489,7 +494,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
|
||||
// register duplicate MIME types first
|
||||
@@ -519,7 +524,6 @@ type Options struct {
|
||||
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
StarredOnly bool `config:"starred_only"`
|
||||
Extensions string `config:"formats"`
|
||||
ExportExtensions string `config:"export_formats"`
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
@@ -528,6 +532,7 @@ type Options struct {
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
@@ -702,7 +707,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
query = append(query, q)
|
||||
}
|
||||
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
@@ -714,16 +718,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if parentsQuery.Len() > 1 {
|
||||
_, _ = parentsQuery.WriteString(" or ")
|
||||
}
|
||||
if (f.opt.SharedWithMe || f.opt.StarredOnly) && dirID == f.rootFolderID {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
}
|
||||
if f.opt.StarredOnly {
|
||||
if f.opt.SharedWithMe {
|
||||
_, _ = parentsQuery.WriteString(" and ")
|
||||
}
|
||||
_, _ = parentsQuery.WriteString("starred=true")
|
||||
}
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
_, _ = parentsQuery.WriteString("sharedWithMe=true")
|
||||
} else {
|
||||
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
|
||||
}
|
||||
@@ -933,32 +929,55 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
f, err := newFs(name, "", m)
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make Fs to list teamdrives")
|
||||
return errors.Wrap(err, "config team drive failed to create oauth client")
|
||||
}
|
||||
svc, err := drive.New(client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "config team drive failed to make drive client")
|
||||
}
|
||||
fmt.Printf("Fetching team drive list...\n")
|
||||
teamDrives, err := f.listTeamDrives(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(teamDrives) == 0 {
|
||||
fmt.Printf("No team drives found in your account")
|
||||
return nil
|
||||
}
|
||||
var driveIDs, driveNames []string
|
||||
for _, teamDrive := range teamDrives {
|
||||
driveIDs = append(driveIDs, teamDrive.Id)
|
||||
driveNames = append(driveNames, teamDrive.Name)
|
||||
listTeamDrives := svc.Teamdrives.List().PageSize(100)
|
||||
listFailed := false
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Listing team drives failed: %v\n", err)
|
||||
listFailed = true
|
||||
break
|
||||
}
|
||||
for _, drive := range teamDrives.TeamDrives {
|
||||
driveIDs = append(driveIDs, drive.Id)
|
||||
driveNames = append(driveNames, drive.Name)
|
||||
}
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listTeamDrives.PageToken(teamDrives.NextPageToken)
|
||||
}
|
||||
var driveID string
|
||||
if !listFailed && len(driveIDs) == 0 {
|
||||
fmt.Printf("No team drives found in your account")
|
||||
} else {
|
||||
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
}
|
||||
driveID := config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
m.Set("team_drive", driveID)
|
||||
m.Set("root_folder_id", "")
|
||||
opt.TeamDriveID = driveID
|
||||
opt.RootFolderID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPacer makes a pacer configured for drive
|
||||
func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
@@ -1041,11 +1060,9 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// newFs partially constructs Fs from the path
|
||||
//
|
||||
// It constructs a valid Fs but doesn't attempt to figure out whether
|
||||
// it is a file or a directory.
|
||||
func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -1075,7 +1092,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
|
||||
pacer: newPacer(opt),
|
||||
m: m,
|
||||
grouping: listRGrouping,
|
||||
listRmu: new(sync.Mutex),
|
||||
@@ -1105,26 +1122,25 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
f, err := newFs(name, path, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// If impersonating warn about root_folder_id if set and unset it
|
||||
//
|
||||
// This is because rclone v1.51 and v1.52 cached root_folder_id when
|
||||
// using impersonate which they shouldn't have done. It is possible
|
||||
// someone is using impersonate and root_folder_id in which case this
|
||||
// breaks their workflow. There isn't an easy way around that.
|
||||
if opt.RootFolderID != "" && opt.RootFolderID != "appDataFolder" && opt.Impersonate != "" {
|
||||
fs.Logf(f, "Ignoring cached root_folder_id when using --drive-impersonate")
|
||||
opt.RootFolderID = ""
|
||||
}
|
||||
|
||||
// Set the root folder ID
|
||||
if f.opt.RootFolderID != "" {
|
||||
// use root_folder ID if set
|
||||
f.rootFolderID = f.opt.RootFolderID
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config and not impersonating
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
// otherwise use team_drive if set
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
// otherwise look up the actual root ID
|
||||
// Look up the root ID and cache it in the config
|
||||
rootID, err := f.getRootID()
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
@@ -1136,24 +1152,27 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
|
||||
// Don't cache the root folder ID if impersonating
|
||||
if opt.Impersonate == "" {
|
||||
m.Set("root_folder_id", rootID)
|
||||
}
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
|
||||
// Parse extensions
|
||||
if f.opt.Extensions != "" {
|
||||
if f.opt.ExportExtensions != defaultExportExtensions {
|
||||
if opt.Extensions != "" {
|
||||
if opt.ExportExtensions != defaultExportExtensions {
|
||||
return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
|
||||
}
|
||||
f.opt.Extensions, f.opt.ExportExtensions = "", f.opt.Extensions
|
||||
opt.Extensions, opt.ExportExtensions = "", opt.Extensions
|
||||
}
|
||||
f.exportExtensions, _, err = parseExtensions(f.opt.ExportExtensions, defaultExportExtensions)
|
||||
f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, f.importMimeTypes, err = parseExtensions(f.opt.ImportExtensions)
|
||||
_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1162,7 +1181,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(f.root)
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
|
||||
tempF.root = newRoot
|
||||
@@ -1253,7 +1272,20 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
url := info.ExportLinks[mediaType]
|
||||
id := actualID(info.Id)
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
|
||||
if f.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.document":
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
|
||||
case "application/vnd.google-apps.presentation":
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
|
||||
}
|
||||
}
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
baseObject.bytes = -1
|
||||
baseObject.mimeType = exportMimeType
|
||||
@@ -1630,7 +1662,7 @@ func (s listRSlices) Less(i, j int) bool {
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error, sendJob func(listREntry)) {
|
||||
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
|
||||
var dirs []string
|
||||
var paths []string
|
||||
var grouping int32
|
||||
@@ -1709,19 +1741,26 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// https://issuetracker.google.com/issues/149522397
|
||||
if len(dirs) > 1 && !foundItems {
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
fs.Logf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
var recycled = make([]listREntry, len(dirs))
|
||||
f.listRmu.Lock()
|
||||
for i := range dirs {
|
||||
// Requeue the jobs
|
||||
job := listREntry{id: dirs[i], path: paths[i]}
|
||||
sendJob(job)
|
||||
recycled[i] = listREntry{id: dirs[i], path: paths[i]}
|
||||
// Make a note of these dirs - if they all turn
|
||||
// out to be empty then we can re-enable grouping
|
||||
f.listRempties[dirs[i]] = struct{}{}
|
||||
}
|
||||
f.listRmu.Unlock()
|
||||
fs.Debugf(f, "Recycled %d entries", len(dirs))
|
||||
// recycle these in the background so we don't deadlock
|
||||
// the listR runners if they all get here
|
||||
wg.Add(len(recycled))
|
||||
go func() {
|
||||
for _, entry := range recycled {
|
||||
in <- entry
|
||||
}
|
||||
fs.Debugf(f, "Recycled %d entries", len(recycled))
|
||||
}()
|
||||
}
|
||||
// If using a grouping of 1 and dir was empty then check to see if it
|
||||
// is part of the group that caused grouping to be disabled.
|
||||
@@ -1735,7 +1774,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// empty so must have made a mistake
|
||||
if len(f.listRempties) == 0 {
|
||||
if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping {
|
||||
fs.Debugf(f, "Re-enabling ListR as previous detection was in error")
|
||||
fs.Logf(f, "Re-enabling ListR as previous detection was in error")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1790,33 +1829,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
// Send a job to the input channel if not closed. If the job
|
||||
// won't fit then queue it in the overflow slice.
|
||||
//
|
||||
// This will not block if the channel is full.
|
||||
sendJob := func(job listREntry) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if in == nil {
|
||||
return
|
||||
}
|
||||
wg.Add(1)
|
||||
select {
|
||||
case in <- job:
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
wg.Add(-1)
|
||||
}
|
||||
}
|
||||
|
||||
// Send the entry to the caller, queueing any directories as new jobs
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
if d, isDir := entry.(*fs.Dir); isDir {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
sendJob(job)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- job:
|
||||
// Adding the wg after we've entered the item is
|
||||
// safe here because we know when the callback
|
||||
// is called we are holding a waitgroup.
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
}
|
||||
}
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
@@ -1825,7 +1852,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||
go f.listRRunner(ctx, &wg, in, out, cb)
|
||||
}
|
||||
go func() {
|
||||
// wait until the all directories are processed
|
||||
@@ -2191,9 +2218,10 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the dir directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
// Rmdir deletes a directory
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.root, dir)
|
||||
dc := f.dirCache
|
||||
directoryID, err := dc.FindDir(ctx, dir, false)
|
||||
@@ -2206,22 +2234,20 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
if check {
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||
return true
|
||||
}
|
||||
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
|
||||
trashedFiles = true
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return errors.Errorf("directory not empty")
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||
return true
|
||||
}
|
||||
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
|
||||
trashedFiles = true
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if found {
|
||||
return errors.Errorf("directory not empty")
|
||||
}
|
||||
if root != "" {
|
||||
// trash the directory if it had trashed files
|
||||
@@ -2231,8 +2257,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if check {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
@@ -2241,13 +2265,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes a directory
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
@@ -2265,13 +2282,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
var srcObj *baseObject
|
||||
ext := ""
|
||||
isDoc := false
|
||||
readDescription := false
|
||||
switch src := src.(type) {
|
||||
case *Object:
|
||||
srcObj = &src.baseObject
|
||||
case *documentObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
isDoc = true
|
||||
readDescription = true
|
||||
case *linkObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
default:
|
||||
@@ -2279,12 +2296,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Look to see if there is an existing object before we remove
|
||||
// the extension from the remote
|
||||
existingObject, _ := f.NewObject(ctx, remote)
|
||||
|
||||
// Adjust the remote name to be without the extension if we
|
||||
// are about to create a doc.
|
||||
if ext != "" {
|
||||
if !strings.HasSuffix(remote, ext) {
|
||||
fs.Debugf(src, "Can't copy - not same document type")
|
||||
@@ -2293,12 +2304,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote = remote[:len(remote)-len(ext)]
|
||||
}
|
||||
|
||||
// Look to see if there is an existing object
|
||||
existingObject, _ := f.NewObject(ctx, remote)
|
||||
|
||||
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isDoc {
|
||||
if readDescription {
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
@@ -2330,22 +2344,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Google docs aren't preserving their mod time after copy, so set them explicitly
|
||||
// See: https://github.com/rclone/rclone/issues/4517
|
||||
//
|
||||
// FIXME remove this when google fixes the problem!
|
||||
if isDoc {
|
||||
// A short sleep is needed here in order to make the
|
||||
// change effective, without it is is ignored. This is
|
||||
// probably some eventual consistency nastiness.
|
||||
sleepTime := 2 * time.Second
|
||||
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
err = newObject.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if existingObject != nil {
|
||||
err = existingObject.Remove(ctx)
|
||||
if err != nil {
|
||||
@@ -2360,11 +2358,23 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.delete(ctx, shortcutID(rootID), f.opt.UseTrash)
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
@@ -2867,98 +2877,6 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
|
||||
return dstFs.newObjectWithInfo(dstPath, info)
|
||||
}
|
||||
|
||||
// List all team drives
|
||||
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
|
||||
drives = []*drive.TeamDrive{}
|
||||
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
|
||||
var defaultFs Fs // default Fs with default Options
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return defaultFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return drives, errors.Wrap(err, "listing team drives failed")
|
||||
}
|
||||
drives = append(drives, teamDrives.TeamDrives...)
|
||||
if teamDrives.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listTeamDrives.PageToken(teamDrives.NextPageToken)
|
||||
}
|
||||
return drives, nil
|
||||
}
|
||||
|
||||
type unTrashResult struct {
|
||||
Untrashed int
|
||||
Errors int
|
||||
}
|
||||
|
||||
func (r unTrashResult) Error() string {
|
||||
return fmt.Sprintf("%d errors while untrashing - see log", r.Errors)
|
||||
}
|
||||
|
||||
// Restore the trashed files from dir, directoryID recursing if needed
|
||||
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
|
||||
directoryID = actualID(directoryID)
|
||||
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
|
||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
remote := path.Join(dir, item.Name)
|
||||
if item.ExplicitlyTrashed {
|
||||
fs.Infof(remote, "restoring %q", item.Id)
|
||||
if operations.SkipDestructive(ctx, remote, "restore") {
|
||||
return false
|
||||
}
|
||||
update := drive.File{
|
||||
ForceSendFields: []string{"Trashed"}, // necessary to set false value
|
||||
Trashed: false,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.svc.Files.Update(item.Id, &update).
|
||||
SupportsAllDrives(true).
|
||||
Fields("trashed").
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to restore")
|
||||
r.Errors++
|
||||
fs.Errorf(remote, "%v", err)
|
||||
} else {
|
||||
r.Untrashed++
|
||||
}
|
||||
}
|
||||
if recurse && item.MimeType == "application/vnd.google-apps.folder" {
|
||||
if !isShortcutID(item.Id) {
|
||||
rNew, _ := f.unTrash(ctx, remote, item.Id, recurse)
|
||||
r.Untrashed += rNew.Untrashed
|
||||
r.Errors += rNew.Errors
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to list directory")
|
||||
r.Errors++
|
||||
fs.Errorf(dir, "%v", err)
|
||||
}
|
||||
if r.Errors != 0 {
|
||||
return r, r
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Untrash dir
|
||||
func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTrashResult, err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
r.Errors++
|
||||
return r, err
|
||||
}
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@@ -3010,55 +2928,6 @@ authenticated with "drive2:" can't read files from "drive:".
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
},
|
||||
}, {
|
||||
Name: "drives",
|
||||
Short: "List the shared drives available to this account",
|
||||
Long: `This command lists the shared drives (teamdrives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend drives drive:
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
|
||||
`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Short: "Untrash files and directories",
|
||||
Long: `This command untrashes all the files and directories in the directory
|
||||
passed in recursively.
|
||||
|
||||
Usage:
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -3122,14 +2991,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
case "drives":
|
||||
return f.listTeamDrives(ctx)
|
||||
case "untrash":
|
||||
dir := ""
|
||||
if len(arg) > 0 {
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
|
||||
@@ -10,16 +10,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
@@ -364,50 +361,6 @@ func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
|
||||
func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Make some objects, one in a subdir
|
||||
contents := random.String(100)
|
||||
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
|
||||
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
|
||||
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
|
||||
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
|
||||
|
||||
// Check objects
|
||||
checkObjects := func() {
|
||||
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
|
||||
file1,
|
||||
file2,
|
||||
}, []string{
|
||||
"trashDir/subdir",
|
||||
}, f.Precision())
|
||||
}
|
||||
checkObjects()
|
||||
|
||||
// Make sure we are using the trash
|
||||
require.Equal(t, true, f.opt.UseTrash)
|
||||
|
||||
// Remove the object and the dir
|
||||
require.NoError(t, obj1.Remove(ctx))
|
||||
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
|
||||
|
||||
// Check objects gone
|
||||
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
|
||||
|
||||
// Restore the object and directory
|
||||
r, err := f.unTrashDir(ctx, "trashDir", true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
|
||||
|
||||
// Check objects restored
|
||||
checkObjects()
|
||||
|
||||
// Remove the test dir
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -423,7 +376,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -125,7 +125,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
||||
|
||||
@@ -155,7 +161,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -605,9 +611,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't remove root
|
||||
@@ -615,33 +622,31 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return errors.New("can't remove root directory")
|
||||
}
|
||||
|
||||
if check {
|
||||
// check directory exists
|
||||
_, err = f.getDirMetadata(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
// check directory exists
|
||||
_, err := f.getDirMetadata(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
arg.Path = "" // Specify root folder as empty string
|
||||
}
|
||||
var res *files.ListFolderResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.ListFolder(&arg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(res.Entries) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
|
||||
// remove it
|
||||
@@ -652,13 +657,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
@@ -721,8 +719,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -29,20 +28,6 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// Detect this error which the integration tests provoke
|
||||
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
|
||||
//
|
||||
// https://1fichier.com/api.html
|
||||
//
|
||||
// file/ls.cgi is limited :
|
||||
//
|
||||
// Warning (can be changed in case of abuses) :
|
||||
// List all files of the account is limited to 1 request per hour.
|
||||
// List folders is limited to 5 000 results and 1 request per folder per 30s.
|
||||
if err != nil && strings.Contains(err.Error(), "Flood detected") {
|
||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
|
||||
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(300e9) {
|
||||
if size > int64(100e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
|
||||
@@ -88,7 +88,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
@@ -255,7 +261,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -109,7 +110,13 @@ func init() {
|
||||
`)
|
||||
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "read_only",
|
||||
Default: false,
|
||||
Help: `Set to make the Google Photos backend read only.
|
||||
@@ -132,7 +139,7 @@ you want to read the media.`,
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@@ -62,7 +63,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
Options: append([]fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}}, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -353,7 +353,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -373,9 +373,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
m.Set(configClientID, "jottacli")
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
@@ -387,6 +384,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oauthConfig.ClientID = "jottacli"
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -553,7 +551,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -1072,8 +1070,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
@@ -1089,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
retry, _ := shouldRetry(resp, err)
|
||||
return (retry && resp.StatusCode != 500), err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1193,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
// surprise! jottacloud fucked up dirmove - the api spits out an error but
|
||||
// dir gets moved regardless
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.StatusCode == 500 {
|
||||
_, err := f.NewObject(ctx, dstRemote)
|
||||
if err == fs.ErrorNotAFile {
|
||||
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
}
|
||||
|
||||
0
backend/local/aaaa
Normal file
0
backend/local/aaaa
Normal file
@@ -4,7 +4,6 @@ package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -16,9 +15,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build windows plan9 js
|
||||
// +build windows plan9
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !windows,!plan9,!js
|
||||
// +build !windows,!plan9
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -144,17 +144,6 @@ the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_set_modtime",
|
||||
Help: `Disable setting modtime
|
||||
|
||||
Normally rclone updates modification time of files after they are done
|
||||
uploading. This can cause permissions issues on Linux platforms when
|
||||
the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -177,7 +166,6 @@ type Options struct {
|
||||
CaseSensitive bool `config:"case_sensitive"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
NoSparse bool `config:"no_sparse"`
|
||||
NoSetModTime bool `config:"no_set_modtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -554,10 +542,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision of the file system
|
||||
func (f *Fs) Precision() (precision time.Duration) {
|
||||
if f.opt.NoSetModTime {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
f.precisionOk.Do(func() {
|
||||
f.precision = f.readPrecision()
|
||||
})
|
||||
@@ -616,25 +600,20 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and directories
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
dir = f.localPath(dir)
|
||||
fi, err := f.lstat(dir)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fi, err := f.lstat(f.root)
|
||||
if err != nil {
|
||||
// already purged
|
||||
if os.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return errors.Errorf("can't purge non directory: %q", dir)
|
||||
return errors.Errorf("can't purge non directory: %q", f.root)
|
||||
}
|
||||
return os.RemoveAll(dir)
|
||||
return os.RemoveAll(f.root)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -899,9 +878,6 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.NoSetModTime {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
if o.translatedLink {
|
||||
err = lChtimes(o.path, modTime, modTime)
|
||||
@@ -1213,7 +1189,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to set sparse: %v", err)
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1231,15 +1207,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.mode = info.Mode()
|
||||
o.fs.objectMetaMu.Unlock()
|
||||
// On Windows links read as 0 size so set the correct size here
|
||||
if runtime.GOOS == "windows" && o.translatedLink {
|
||||
linkdst, err := os.Readlink(o.path)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read link size: %v", err)
|
||||
} else {
|
||||
o.size = int64(len(linkdst))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stat an Object into info
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
if runtime.GOOS == "windows" {
|
||||
file2.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
if runtime.GOOS == "windows" {
|
||||
file3.Size = 0 // symlinks are 0 length under Windows
|
||||
}
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
|
||||
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
}
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||
|
||||
@@ -117,7 +117,7 @@ type ListItem struct {
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime uint64 `json:"mtime,omitempty"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
@@ -159,6 +159,71 @@ type FolderInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -656,14 +655,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
@@ -677,7 +671,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(mTime, 0),
|
||||
modTime: time.Unix(item.Mtime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
@@ -1168,12 +1162,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeWithCheck(ctx, dir, true, "rmdir")
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the root directory
|
||||
// Optional interface: Only implement this if you have a way of deleting
|
||||
// all the files quicker than just running Remove() on the result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
// fs.Debugf(f, ">>> Purge")
|
||||
return f.purgeWithCheck(ctx, dir, false, "purge")
|
||||
return f.purgeWithCheck(ctx, "", false, "purge")
|
||||
}
|
||||
|
||||
// purgeWithCheck() removes the root directory.
|
||||
@@ -1867,30 +1861,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
|
||||
return f.shardURL, nil
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
RootURL: api.DispatchServerURL,
|
||||
Method: "GET",
|
||||
Path: "/u",
|
||||
}
|
||||
|
||||
var (
|
||||
res *http.Response
|
||||
url string
|
||||
err error
|
||||
)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err = f.srv.Call(ctx, &opts)
|
||||
if err == nil {
|
||||
url, err = readBodyWord(res)
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
})
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = url
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/api/m1/dispatcher",
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"access_token": {token},
|
||||
},
|
||||
}
|
||||
|
||||
var info api.ShardInfoResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(res, err, f, &opts)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
f.shardURL = info.Body.Upload[0].URL
|
||||
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
|
||||
fs.Debugf(f, "new upload shard: %s", f.shardURL)
|
||||
|
||||
@@ -2122,18 +2116,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start, end, partialRequest := getTransferRange(o.size, options...)
|
||||
|
||||
headers := map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Content-Type": "application/octet-stream",
|
||||
}
|
||||
if partialRequest {
|
||||
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
|
||||
headers["Range"] = rangeStr
|
||||
// headers["Content-Range"] = rangeStr
|
||||
headers["Accept-Ranges"] = "bytes"
|
||||
}
|
||||
start, end, partial := getTransferRange(o.size, options...)
|
||||
|
||||
// TODO: set custom timeouts
|
||||
opts := rest.Opts{
|
||||
@@ -2144,7 +2127,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
},
|
||||
ExtraHeaders: headers,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Accept": "*/*",
|
||||
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
|
||||
},
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
@@ -2165,36 +2151,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Server should respond with Status 206 and Content-Range header to a range
|
||||
// request. Status 200 (and no Content-Range) means a full-content response.
|
||||
partialResponse := res.StatusCode == 206
|
||||
|
||||
var (
|
||||
hasher gohash.Hash
|
||||
wrapStream io.ReadCloser
|
||||
)
|
||||
if !partialResponse {
|
||||
var hasher gohash.Hash
|
||||
if !partial {
|
||||
// Cannot check hash of partial download
|
||||
hasher = mrhash.New()
|
||||
}
|
||||
wrapStream = &endHandler{
|
||||
wrapStream := &endHandler{
|
||||
ctx: ctx,
|
||||
stream: res.Body,
|
||||
hasher: hasher,
|
||||
o: o,
|
||||
server: server,
|
||||
}
|
||||
if partialRequest && !partialResponse {
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
|
||||
}
|
||||
return wrapStream, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -669,13 +669,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
|
||||
|
||||
@@ -410,28 +410,3 @@ func (i *Item) GetParentReference() *ItemReference {
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
// User details for each version
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName"`
|
||||
}
|
||||
|
||||
// LastModifiedBy for each version
|
||||
type LastModifiedBy struct {
|
||||
User User `json:"user"`
|
||||
}
|
||||
|
||||
// Version info
|
||||
type Version struct {
|
||||
ID string `json:"id"`
|
||||
LastModifiedDateTime time.Time `json:"lastModifiedDateTime"`
|
||||
Size int `json:"size"`
|
||||
LastModifiedBy LastModifiedBy `json:"lastModifiedBy"`
|
||||
}
|
||||
|
||||
// VersionsResponse is returned from /versions
|
||||
type VersionsResponse struct {
|
||||
Versions []Version `json:"value"`
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -27,8 +26,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -241,7 +238,13 @@ func init() {
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
config.SaveConfig()
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Microsoft App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
@@ -281,23 +284,6 @@ different Onedrives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_versions",
|
||||
Default: false,
|
||||
Help: `Remove all versions on modifying operations
|
||||
|
||||
Onedrive for business creates versions when rclone uploads new files
|
||||
overwriting an existing one and when it sets the modification time.
|
||||
|
||||
These versions take up space out of the quota.
|
||||
|
||||
This flag checks for versions after file upload and setting
|
||||
modification time and removes all but the last version.
|
||||
|
||||
**NB** Onedrive personal can't currently delete versions so don't use
|
||||
this flag there.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -344,7 +330,7 @@ this flag there.
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -355,7 +341,6 @@ type Options struct {
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
NoVersions bool `config:"no_versions"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1088,13 +1073,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -1247,10 +1232,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
q := drive.Quota
|
||||
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
|
||||
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(q.Used), // bytes in use
|
||||
@@ -1294,73 +1275,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return result.Link.WebURL, nil
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
token := make(chan struct{}, fs.Config.Checkers)
|
||||
var wg sync.WaitGroup
|
||||
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||
err = entries.ForObjectError(func(obj fs.Object) error {
|
||||
o, ok := obj.(*Object)
|
||||
if !ok {
|
||||
return errors.New("internal error: not a onedrive object")
|
||||
}
|
||||
wg.Add(1)
|
||||
token <- struct{}{}
|
||||
go func() {
|
||||
defer func() {
|
||||
<-token
|
||||
wg.Done()
|
||||
}()
|
||||
err := o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
wg.Wait()
|
||||
return err
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersions(ctx context.Context) error {
|
||||
opts := newOptsCall(o.id, "GET", "/versions")
|
||||
var versions api.VersionsResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(versions.Versions) < 2 {
|
||||
return nil
|
||||
}
|
||||
for _, version := range versions.Versions[1:] {
|
||||
err = o.deleteVersion(ctx, version.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finds and removes any old versions for o
|
||||
func (o *Object) deleteVersion(ctx context.Context, ID string) error {
|
||||
if operations.SkipDestructive(ctx, fmt.Sprintf("%s of %s", ID, o.remote), "delete version") {
|
||||
return nil
|
||||
}
|
||||
fs.Infof(o, "removing version %q", ID)
|
||||
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
|
||||
opts.NoResponse = true
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1524,13 +1438,6 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
// Remove versions if required
|
||||
if o.fs.opt.NoVersions {
|
||||
err := o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -1837,14 +1744,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// If updating the file then remove versions
|
||||
if o.fs.opt.NoVersions && o.hasMetaData {
|
||||
err = o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
@@ -1941,7 +1840,6 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
|
||||
@@ -506,13 +506,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
@@ -646,6 +646,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
400, // Bad request (seen in "Next token is expired")
|
||||
401, // Unauthorized (seen in "Token has expired")
|
||||
408, // Request Timeout
|
||||
423, // Locked - get this on folders sometimes
|
||||
|
||||
@@ -103,7 +103,13 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -122,20 +128,10 @@ func init() {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
|
||||
This is normally set when rclone initially does the oauth connection,
|
||||
however you will need to set it by hand if you are using remote config
|
||||
with rclone authorize.
|
||||
`,
|
||||
This is normally set when rclone initially does the oauth connection.`,
|
||||
Default: defaultHostname,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: defaultHostname,
|
||||
Help: "Original/US region",
|
||||
}, {
|
||||
Value: "eapi.pcloud.com",
|
||||
Help: "EU region",
|
||||
}},
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -675,13 +671,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
@@ -824,19 +820,14 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
|
||||
}
|
||||
|
||||
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
|
||||
obj, err := f.NewObject(ctx, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o := obj.(*Object)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/getfilepublink",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var result api.PubLinkResult
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts.Parameters.Set("path", path)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -849,6 +840,11 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return f.linkFile(ctx, remote, expire)
|
||||
|
||||
@@ -609,13 +609,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
|
||||
@@ -458,9 +458,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
|
||||
root := strings.Trim(path.Join(f.root, dir), "/")
|
||||
@@ -477,20 +478,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
}
|
||||
dirID := atoi(directoryID)
|
||||
|
||||
if check {
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
|
||||
// remove it
|
||||
@@ -503,26 +502,35 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
rootIDs, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID := atoi(rootIDs)
|
||||
// Let putio delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", rootID)
|
||||
err = f.client.Files.Delete(ctx, rootID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.ResetRoot()
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test QingStor filesystem interface
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 js
|
||||
// +build plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Upload object to QingStor
|
||||
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
684
backend/s3/s3.go
684
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -46,7 +46,7 @@ type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int64 `json:"size"`
|
||||
Size int `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
@@ -584,38 +584,29 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
// Rmdir removes the directory or library if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
libraryName, dirPath := f.splitPath(dir)
|
||||
libraryID, err := f.getLibraryID(ctx, libraryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if check {
|
||||
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(directoryEntries) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(directoryEntries) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
if dirPath == "" || dirPath == "/" {
|
||||
return f.deleteLibrary(ctx, libraryID)
|
||||
}
|
||||
return f.deleteDir(ctx, libraryID, dirPath)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory or library if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.ListRer ====================
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -902,14 +893,33 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// ==================== Optional Interface fs.Purger ====================
|
||||
|
||||
// Purge all files in the directory
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.libraryName == "" {
|
||||
return errors.New("Cannot delete from the root of the server. Please select a library")
|
||||
}
|
||||
libraryID, err := f.getLibraryID(ctx, f.libraryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
// Delete library
|
||||
err = f.deleteLibrary(ctx, libraryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.deleteDir(ctx, libraryID, f.rootDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.CleanUpper ====================
|
||||
@@ -1004,7 +1014,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
|
||||
|
||||
for _, library := range libraries {
|
||||
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
|
||||
d.SetSize(library.Size)
|
||||
d.SetSize(int64(library.Size))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
|
||||
@@ -164,18 +164,6 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "subsystem",
|
||||
Default: "sftp",
|
||||
Help: "Specifies the SSH2 subsystem on the remote host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_command",
|
||||
Default: "",
|
||||
Help: `Specifies the path or command to run a sftp server on the remote host.
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -199,8 +187,6 @@ type Options struct {
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
Subsystem string `config:"subsystem"`
|
||||
ServerCommand string `config:"server_command"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -304,7 +290,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
c.sftpClient, err = f.newSftpClient(c.sshClient)
|
||||
c.sftpClient, err = sftp.NewClient(c.sshClient)
|
||||
if err != nil {
|
||||
_ = c.sshClient.Close()
|
||||
return nil, errors.Wrap(err, "couldn't initialise SFTP")
|
||||
@@ -313,35 +299,6 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pw, err := s.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr, err := s.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if f.opt.ServerCommand != "" {
|
||||
if err := s.Start(f.opt.ServerCommand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := s.RequestSubsystem(f.opt.Subsystem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
||||
// Get an SFTP connection from the pool, or open a new one
|
||||
func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
f.poolMu.Lock()
|
||||
@@ -1087,7 +1044,7 @@ func shellEscape(str string) string {
|
||||
func parseHash(bytes []byte) string {
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
@@ -853,8 +853,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// updateItem patches a file or folder
|
||||
|
||||
@@ -895,12 +895,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
// Caution: Deleting a folder may orphan objects. It's important
|
||||
// to remove the contents of the folder before you delete the
|
||||
// folder. That's because removing a folder using DELETE does not
|
||||
@@ -920,7 +920,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if f.opt.HardDelete {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// moveFile moves a file server side
|
||||
|
||||
@@ -840,21 +840,17 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and directories
|
||||
//
|
||||
// Implemented here so we can make sure we delete directory markers
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
// Delete all the files including the directory markers
|
||||
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -868,7 +864,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Rmdir(ctx, dir)
|
||||
return f.Rmdir(ctx, "")
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -1115,18 +1111,13 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsContainer, _, err := o.getSegmentsDlo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
except = path.Join(o.remote, except)
|
||||
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
|
||||
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
||||
@@ -1335,7 +1326,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// object has been safely uploaded
|
||||
o.lastModified = modTime
|
||||
o.size = size
|
||||
o.md5 = rxHeaders["Etag"]
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
if inCount != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -66,9 +67,31 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
obj := entries[0].(*upstream.Object)
|
||||
return obj.Update(ctx, in, src, options...)
|
||||
}
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(entries), in)
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(entries))
|
||||
writers := make([]io.Writer, len(entries))
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
for i := range entries {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(entries)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
@@ -77,7 +100,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
errs[len(entries)] = <-errChan
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -145,16 +145,11 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -167,13 +162,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
for _, r := range f.upstreams {
|
||||
if r.Features().Purge == nil {
|
||||
return fs.ErrorCantPurge
|
||||
@@ -185,10 +180,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Features().Purge(ctx, dir)
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
err := upstreams[i].Features().Purge(ctx)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
@@ -390,37 +382,6 @@ func (f *Fs) DirCacheFlush() {
|
||||
})
|
||||
}
|
||||
|
||||
// Tee in into n outputs
|
||||
//
|
||||
// When finished read the error from the channel
|
||||
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
|
||||
readers := make([]io.Reader, n)
|
||||
pipeWriters := make([]*io.PipeWriter, n)
|
||||
writers := make([]io.Writer, n)
|
||||
errChan := make(chan error, 1)
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], pipeWriters[i], writers[i] = r, w, bw
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, 2*n+1)
|
||||
_, copyErr := io.Copy(mw, in)
|
||||
es[2*n] = copyErr
|
||||
// Flush the buffers
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
// Close the underlying pipes
|
||||
for i, pw := range pipeWriters {
|
||||
es[2*i] = pw.CloseWithError(copyErr)
|
||||
}
|
||||
errChan <- Errors(es).Err()
|
||||
}()
|
||||
return readers, errChan
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
@@ -448,9 +409,31 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
e, err := f.wrapEntries(u.WrapObject(o))
|
||||
return e.(*Object), err
|
||||
}
|
||||
// Multi-threading
|
||||
readers, errChan := multiReader(len(upstreams), in)
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(upstreams))
|
||||
writers := make([]io.Writer, len(upstreams))
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(upstreams)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
objs := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
@@ -467,7 +450,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
errs[len(upstreams)] = <-errChan
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -522,9 +504,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
usg, err := u.About(ctx)
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -823,7 +802,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -153,29 +153,3 @@ func TestPolicy2(t *testing.T) {
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "all"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -97,7 +97,6 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = myFs
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -336,9 +335,6 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
||||
usage, err := f.RootFs.Features().About(ctx)
|
||||
if err != nil {
|
||||
f.cacheUpdate = false
|
||||
if errors.Cause(err) == fs.ErrorDirNotFound {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if lock {
|
||||
|
||||
@@ -686,8 +686,8 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) {
|
||||
// defer log.Trace(dirPath, "")("err=%v", &err)
|
||||
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
@@ -699,27 +699,6 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) {
|
||||
return f.mkdir(ctx, parent)
|
||||
}
|
||||
|
||||
// _dirExists - list dirPath to see if it exists
|
||||
//
|
||||
// dirPath should be a native path ending in a /
|
||||
func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) {
|
||||
opts := rest.Opts{
|
||||
Method: "PROPFIND",
|
||||
Path: dirPath,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "0",
|
||||
},
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
// We assume the root is already created
|
||||
@@ -740,29 +719,19 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// Check if it already exists. The response code for this isn't
|
||||
// defined in the RFC so the implementations vary wildly.
|
||||
//
|
||||
// already exists
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// 4shared returns a 409/StatusConflict here which clashes
|
||||
// horribly with the intermediate paths don't exist meaning. So
|
||||
// check to see if actually exists. This will correct other
|
||||
// error codes too.
|
||||
if f._dirExists(ctx, dirPath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) (err error) {
|
||||
// defer log.Trace(dirPath, "")("err=%v", &err)
|
||||
err = f._mkdir(ctx, dirPath)
|
||||
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
err := f._mkdir(ctx, dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// parent does not exist so create it first then try again
|
||||
if apiErr.StatusCode == http.StatusConflict {
|
||||
@@ -899,13 +868,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.copyOrMove(ctx, src, remote, "COPY")
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
|
||||
@@ -67,7 +67,13 @@ func init() {
|
||||
return
|
||||
}
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Yandex Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Yandex Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -75,7 +81,7 @@ func init() {
|
||||
// it doesn't seem worth making an exception for this
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}}...),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -631,13 +637,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
|
||||
@@ -45,6 +45,7 @@ var (
|
||||
var osarches = []string{
|
||||
"windows/386",
|
||||
"windows/amd64",
|
||||
"darwin/386",
|
||||
"darwin/amd64",
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
@@ -66,7 +67,6 @@ var osarches = []string{
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
"js/wasm",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
@@ -280,7 +280,7 @@ func stripVersion(goarch string) string {
|
||||
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
output += ".exe"
|
||||
@@ -298,6 +298,7 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
"..",
|
||||
@@ -320,16 +321,14 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
return false
|
||||
}
|
||||
if !*compileOnly {
|
||||
if goos != "js" {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
|
||||
}
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
if goos == "linux" {
|
||||
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
|
||||
}
|
||||
if *copyAs != "" {
|
||||
for _, artifact := range artifacts {
|
||||
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
|
||||
}
|
||||
}
|
||||
// tidy up
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Thrash the VFS tests
|
||||
|
||||
set -e
|
||||
|
||||
# Optionally set the iterations with the first parameter
|
||||
iterations=${1:-100}
|
||||
|
||||
base=$(dirname $(dirname $(realpath "$0")))
|
||||
echo ${base}
|
||||
run=${base}/bin/test-repeat.sh
|
||||
echo ${run}
|
||||
|
||||
testdirs="
|
||||
vfs
|
||||
vfs/vfscache
|
||||
vfs/vfscache/writeback
|
||||
vfs/vfscache/downloaders
|
||||
cmd/cmount
|
||||
"
|
||||
|
||||
for testdir in ${testdirs}; do
|
||||
echo "Testing ${testdir} with ${iterations} iterations"
|
||||
cd ${base}/${testdir}
|
||||
${run} -i=${iterations} -race -tags=cmount
|
||||
done
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!js
|
||||
// +build !plan9
|
||||
|
||||
package cachestats
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 js
|
||||
// +build plan9
|
||||
|
||||
package cachestats
|
||||
|
||||
@@ -29,7 +29,6 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -51,7 +50,7 @@ the source match the files in the destination, not the other way
|
||||
around. This means that extra files in the destination that are not in
|
||||
the source will not be detected.
|
||||
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
|
||||
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
|
||||
and |--error| flags write paths, one per line, to the file name (or
|
||||
stdout if it is |-|) supplied. What they write is described in the
|
||||
help below. For example |--differ| will write all paths which are
|
||||
|
||||
@@ -14,7 +14,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Short: `Clean up the remote if possible`,
|
||||
Long: `
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
@@ -89,10 +89,8 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
||||
f, err := cache.Get(remote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, path.Base(fsPath)
|
||||
case nil:
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f, ""
|
||||
default:
|
||||
err = fs.CountError(err)
|
||||
@@ -141,7 +139,6 @@ func newFsDir(remote string) fs.Fs {
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -200,7 +197,6 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
128
cmd/cmount/fs.go
128
cmd/cmount/fs.go
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const fhUnset = ^uint64(0)
|
||||
@@ -31,10 +32,10 @@ type FS struct {
|
||||
}
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(VFS *vfs.VFS) *FS {
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: VFS,
|
||||
f: VFS.Fs(),
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
ready: make(chan (struct{})),
|
||||
}
|
||||
return fsys
|
||||
@@ -217,18 +218,12 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
itemsRead := -1
|
||||
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
|
||||
|
||||
dir, errc := fsys.lookupDir(dirPath)
|
||||
node, errc := fsys.getHandle(fh)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
|
||||
// We can't seek in directories and FUSE should know that so
|
||||
// return an error if ofst is ever set.
|
||||
if ofst > 0 {
|
||||
return -fuse.ESPIPE
|
||||
}
|
||||
|
||||
nodes, err := dir.ReadDirAll()
|
||||
items, err := node.Readdir(-1)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -237,7 +232,7 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// for getattr (but FUSE only looks at st_ino and the
|
||||
// file-type bits of st_mode).
|
||||
//
|
||||
// We have called host.SetCapReaddirPlus() so WinFsp will
|
||||
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
|
||||
// use the full stat information - a Useful optimization on
|
||||
// Windows.
|
||||
//
|
||||
@@ -248,19 +243,25 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// directory is read in a single readdir operation.
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
for _, node := range nodes {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
for _, item := range items {
|
||||
node, ok := item.(vfs.Node)
|
||||
if ok {
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
if usingReaddirPlus {
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
} else {
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
}
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
// It is very cheap at this point so supply it regardless of OS capabilities
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
}
|
||||
itemsRead = len(nodes)
|
||||
itemsRead = len(items)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -289,65 +290,41 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
return 0
|
||||
}
|
||||
|
||||
// OpenEx opens a file
|
||||
func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) {
|
||||
defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh)
|
||||
fi.Fh = fhUnset
|
||||
|
||||
// translate the fuse flags to os flags
|
||||
flags := translateOpenFlags(fi.Flags)
|
||||
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
// If size unknown then use direct io to read
|
||||
if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 {
|
||||
fi.DirectIo = true
|
||||
}
|
||||
|
||||
fi.Fh = fsys.openHandle(handle)
|
||||
return 0
|
||||
}
|
||||
|
||||
// Open opens a file
|
||||
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
var fi = fuse.FileInfo_t{
|
||||
Flags: flags,
|
||||
}
|
||||
errc = fsys.OpenEx(path, &fi)
|
||||
return errc, fi.Fh
|
||||
}
|
||||
defer log.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
|
||||
// CreateEx creates and opens a file.
|
||||
func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) {
|
||||
defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh)
|
||||
fi.Fh = fhUnset
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
file, err := parentDir.Create(leaf, fi.Flags)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
flags = translateOpenFlags(flags)
|
||||
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
fi.Fh = fsys.openHandle(handle)
|
||||
return 0
|
||||
|
||||
// FIXME add support for unknown length files setting direct_io
|
||||
// See: https://github.com/billziss-gh/cgofuse/issues/38
|
||||
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
// Create creates and opens a file.
|
||||
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
|
||||
var fi = fuse.FileInfo_t{
|
||||
Flags: flags,
|
||||
defer log.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
|
||||
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
|
||||
if errc != 0 {
|
||||
return errc, fhUnset
|
||||
}
|
||||
errc = fsys.CreateEx(filePath, mode, &fi)
|
||||
return errc, fi.Fh
|
||||
file, err := parentDir.Create(leaf, flags)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags = translateOpenFlags(flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
if err != nil {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
// Truncate truncates a file to size
|
||||
@@ -618,12 +595,3 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
// NB O_SYNC isn't defined by fuse
|
||||
return outFlags
|
||||
}
|
||||
|
||||
// Make sure interfaces are satisfied
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
)
|
||||
|
||||
@@ -11,15 +11,26 @@ package cmount
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const (
|
||||
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
|
||||
// capability [Windows only]. A file system that has the readdir-plus capability can send
|
||||
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
|
||||
usingReaddirPlus = runtime.GOOS == "windows"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -27,91 +38,78 @@ func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
name = "mount"
|
||||
}
|
||||
mountlib.NewMountCommand(name, false, mount)
|
||||
mountlib.NewMountCommand(name, false, Mount)
|
||||
// Add mount to rc
|
||||
mountlib.AddRc("cmount", mount)
|
||||
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
func mountOptions(device string, mountpoint string) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", mountlib.AttrTimeout.Seconds()),
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
}
|
||||
if opt.DebugFUSE {
|
||||
if mountlib.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
}
|
||||
|
||||
// OSX options
|
||||
if runtime.GOOS == "darwin" {
|
||||
if opt.NoAppleDouble {
|
||||
if mountlib.NoAppleDouble {
|
||||
options = append(options, "-o", "noappledouble")
|
||||
}
|
||||
if opt.NoAppleXattr {
|
||||
if mountlib.NoAppleXattr {
|
||||
options = append(options, "-o", "noapplexattr")
|
||||
}
|
||||
}
|
||||
|
||||
// determine if ExtraOptions already has an opt in
|
||||
hasOption := func(optionName string) bool {
|
||||
optionName += "="
|
||||
for _, option := range opt.ExtraOptions {
|
||||
if strings.HasPrefix(option, optionName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Windows options
|
||||
if runtime.GOOS == "windows" {
|
||||
// These cause WinFsp to mean the current user
|
||||
if !hasOption("uid") {
|
||||
options = append(options, "-o", "uid=-1")
|
||||
}
|
||||
if !hasOption("gid") {
|
||||
options = append(options, "-o", "gid=-1")
|
||||
}
|
||||
options = append(options, "-o", "uid=-1")
|
||||
options = append(options, "-o", "gid=-1")
|
||||
options = append(options, "--FileSystemName=rclone")
|
||||
}
|
||||
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
|
||||
if opt.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+opt.VolumeName)
|
||||
if mountlib.VolumeName != "" {
|
||||
options = append(options, "-o", "volname="+mountlib.VolumeName)
|
||||
}
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, "-o", "allow_root")
|
||||
}
|
||||
if opt.DefaultPermissions {
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, "-o", "default_permissions")
|
||||
}
|
||||
if VFS.Opt.ReadOnly {
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
options = append(options, "-o", "ro")
|
||||
}
|
||||
if opt.WritebackCache {
|
||||
if mountlib.WritebackCache {
|
||||
// FIXME? options = append(options, "-o", WritebackCache())
|
||||
}
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(mountlib.DaemonTimeout.Seconds())))
|
||||
}
|
||||
for _, option := range opt.ExtraOptions {
|
||||
for _, option := range mountlib.ExtraOptions {
|
||||
options = append(options, "-o", option)
|
||||
}
|
||||
for _, option := range opt.ExtraFlags {
|
||||
for _, option := range mountlib.ExtraFlags {
|
||||
options = append(options, option)
|
||||
}
|
||||
return options
|
||||
@@ -137,39 +135,35 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
// Check the mountpoint - in Windows the mountpoint mustn't exist before the mount
|
||||
if runtime.GOOS != "windows" {
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "mountpoint")
|
||||
return nil, nil, nil, errors.Wrap(err, "mountpoint")
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, nil, errors.New("mountpoint is not a directory")
|
||||
return nil, nil, nil, errors.New("mountpoint is not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
// Create underlying FS
|
||||
fsys := NewFS(VFS)
|
||||
fsys := NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
if usingReaddirPlus {
|
||||
host.SetCapReaddirPlus(true)
|
||||
}
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
fs.Debugf(f, "Mounting with options: %q", options)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errChan <- errors.Errorf("mount failed: %v", r)
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
ok := host.Mount(mountpoint, options)
|
||||
if !ok {
|
||||
@@ -205,7 +199,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
select {
|
||||
case err := <-errChan:
|
||||
err = errors.Wrap(err, "mount stopped before calling Init")
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
case <-fsys.ready:
|
||||
}
|
||||
|
||||
@@ -220,5 +214,53 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
}
|
||||
}
|
||||
|
||||
return errChan, unmount, nil
|
||||
return fsys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
// Note cgofuse unmounts the fs on SIGINT etc
|
||||
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
Copy the source to the destination. Doesn't transfer
|
||||
unchanged files, testing by size and modification time or
|
||||
|
||||
@@ -15,7 +15,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied.`,
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
If source:path is a file or directory then it copies it to a file or
|
||||
directory named dest:path.
|
||||
|
||||
@@ -22,32 +22,19 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate filenames and delete/rename them.`,
|
||||
Short: `Interactively find duplicate files and delete/rename them.`,
|
||||
Long: `
|
||||
|
||||
By default ` + "`dedupe`" + ` interactively finds files with duplicate
|
||||
names and offers to delete all but one or rename them to be
|
||||
different.
|
||||
|
||||
This is only useful with backends like Google Drive which can have
|
||||
duplicate file names. It can be run on wrapping backends (eg crypt) if
|
||||
they wrap a backend which supports duplicate file names.
|
||||
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
|
||||
delete all but one or rename them to be different. Only useful with
|
||||
Google Drive which can have duplicate file names.
|
||||
|
||||
In the first pass it will merge directories with the same name. It
|
||||
will do this iteratively until all the identically named directories
|
||||
have been merged.
|
||||
will do this iteratively until all the identical directories have been
|
||||
merged.
|
||||
|
||||
In the second pass, for every group of duplicate file names, it will
|
||||
delete all but one identical files it finds without confirmation.
|
||||
This means that for most duplicated files the ` + "`dedupe`" + `
|
||||
command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same hash. If the backend does not support hashes (eg crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
|
||||
md5sum) files it finds without confirmation. This means that for most
|
||||
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
@@ -65,26 +52,26 @@ Before - with duplicates
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
|
||||
Now the ` + "`dedupe`" + ` session
|
||||
Now the ` + "`" + `dedupe` + "`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: Found 4 duplicates - deleting identical copies
|
||||
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicates names
|
||||
two.txt: Found 3 duplicates - deleting identical copies
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
|
||||
@@ -44,7 +44,7 @@ func init() {
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing.`,
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Long: `
|
||||
List the contents of the source path (directories and objects) to
|
||||
standard output in a form which is easy to parse by scripts. By
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
*vfs.Dir
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
@@ -28,7 +27,7 @@ var _ fusefs.Node = (*Dir)(nil)
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Valid = d.fsys.opt.AttrTimeout
|
||||
a.Valid = mountlib.AttrTimeout
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
|
||||
@@ -76,7 +75,7 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
resp.EntryValid = d.fsys.opt.AttrTimeout
|
||||
resp.EntryValid = mountlib.AttrTimeout
|
||||
// Check the mnode to see if it has a fuse Node cached
|
||||
// We must return the same fuse nodes for vfs Nodes
|
||||
node, ok := mnode.Sys().(fusefs.Node)
|
||||
@@ -85,9 +84,9 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
}
|
||||
switch x := mnode.(type) {
|
||||
case *vfs.File:
|
||||
node = &File{x, d.fsys}
|
||||
node = &File{x}
|
||||
case *vfs.Dir:
|
||||
node = &Dir{x, d.fsys}
|
||||
node = &Dir{x}
|
||||
default:
|
||||
panic("bad type")
|
||||
}
|
||||
@@ -140,7 +139,7 @@ func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.Cr
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
node = &File{file, d.fsys}
|
||||
node = &File{file}
|
||||
file.SetSys(node) // cache the FUSE node for later
|
||||
return node, &FileHandle{fh}, err
|
||||
}
|
||||
@@ -154,7 +153,7 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.No
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
node = &Dir{dir, d.fsys}
|
||||
node = &Dir{dir}
|
||||
dir.SetSys(node) // cache the FUSE node for later
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
// File represents a file
|
||||
type File struct {
|
||||
*vfs.File
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
@@ -24,7 +24,7 @@ var _ fusefs.Node = (*File)(nil)
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
|
||||
a.Valid = f.fsys.opt.AttrTimeout
|
||||
a.Valid = mountlib.AttrTimeout
|
||||
modTime := f.File.ModTime()
|
||||
Size := uint64(f.File.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
|
||||
@@ -15,24 +15,23 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
*vfs.VFS
|
||||
f fs.Fs
|
||||
opt *mountlib.Options
|
||||
f fs.Fs
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.FS = (*FS)(nil)
|
||||
|
||||
// NewFS makes a new FS
|
||||
func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS {
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: VFS,
|
||||
f: VFS.Fs(),
|
||||
opt: opt,
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
@@ -44,7 +43,7 @@ func (f *FS) Root() (node fusefs.Node, err error) {
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &Dir{root, f}, nil
|
||||
return &Dir{root}, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
|
||||
@@ -6,67 +6,74 @@ package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount", false, mount)
|
||||
mountlib.NewMountCommand("mount", false, Mount)
|
||||
// Add mount to rc
|
||||
mountlib.AddRc("mount", mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options []fuse.MountOption) {
|
||||
func mountOptions(device string) (options []fuse.MountOption) {
|
||||
options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(opt.MaxReadAhead)),
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device),
|
||||
fuse.VolumeName(opt.VolumeName),
|
||||
fuse.VolumeName(mountlib.VolumeName),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if opt.AsyncRead {
|
||||
if mountlib.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if opt.NoAppleDouble {
|
||||
if mountlib.NoAppleDouble {
|
||||
options = append(options, fuse.NoAppleDouble())
|
||||
}
|
||||
if opt.NoAppleXattr {
|
||||
if mountlib.NoAppleXattr {
|
||||
options = append(options, fuse.NoAppleXattr())
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
if mountlib.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if opt.AllowOther {
|
||||
if mountlib.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
if mountlib.AllowRoot {
|
||||
// options = append(options, fuse.AllowRoot())
|
||||
fs.Errorf(nil, "Ignoring --allow-root. Support has been removed upstream - see https://github.com/bazil/fuse/issues/144 for more info")
|
||||
}
|
||||
if opt.DefaultPermissions {
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, fuse.DefaultPermissions())
|
||||
}
|
||||
if VFS.Opt.ReadOnly {
|
||||
if vfsflags.Opt.ReadOnly {
|
||||
options = append(options, fuse.ReadOnly())
|
||||
}
|
||||
if opt.WritebackCache {
|
||||
if mountlib.WritebackCache {
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(opt.DaemonTimeout.Seconds()))))
|
||||
if mountlib.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(mountlib.DaemonTimeout.Seconds()))))
|
||||
}
|
||||
if len(opt.ExtraOptions) > 0 {
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
if len(opt.ExtraFlags) > 0 {
|
||||
if len(mountlib.ExtraFlags) > 0 {
|
||||
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
|
||||
}
|
||||
return options
|
||||
@@ -78,25 +85,14 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
filesys := NewFS(VFS, opt)
|
||||
filesys := NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
@@ -113,7 +109,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
if err := c.MountError; err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
unmount := func() error {
|
||||
@@ -122,5 +118,63 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
return fuse.Unmount(mountpoint)
|
||||
}
|
||||
|
||||
return errChan, unmount, nil
|
||||
return filesys.VFS, errChan, unmount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
if mountlib.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
fs.Debugf("fuse", "%v", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Mount it
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.IgnoreSignals()
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := FS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,15 +33,13 @@ import (
|
||||
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
|
||||
// for an example.
|
||||
type FileHandle struct {
|
||||
h vfs.Handle
|
||||
fsys *FS
|
||||
h vfs.Handle
|
||||
}
|
||||
|
||||
// Create a new FileHandle
|
||||
func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
|
||||
func newFileHandle(h vfs.Handle) *FileHandle {
|
||||
return &FileHandle{
|
||||
h: h,
|
||||
fsys: fsys,
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +115,7 @@ var _ fusefs.FileFsyncer = (*FileHandle)(nil)
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
func (f *FileHandle) Getattr(ctx context.Context, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "")("attr=%v, errno=%v", &out, &errno)
|
||||
f.fsys.setAttrOut(f.h.Node(), out)
|
||||
setAttrOut(f.h.Node(), out)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -127,7 +125,7 @@ var _ fusefs.FileGetattrer = (*FileHandle)(nil)
|
||||
func (f *FileHandle) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "in=%v", in)("attr=%v, errno=%v", &out, &errno)
|
||||
var err error
|
||||
f.fsys.setAttrOut(f.h.Node(), out)
|
||||
setAttrOut(f.h.Node(), out)
|
||||
size, ok := in.GetSize()
|
||||
if ok {
|
||||
err = f.h.Truncate(int64(size))
|
||||
|
||||
@@ -14,21 +14,20 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
VFS *vfs.VFS
|
||||
f fs.Fs
|
||||
opt *mountlib.Options
|
||||
}
|
||||
|
||||
// NewFS creates a pathfs.FileSystem from the fs.Fs passed in
|
||||
func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS {
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: VFS,
|
||||
f: VFS.Fs(),
|
||||
opt: opt,
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
@@ -67,8 +66,8 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.GID
|
||||
attr.Owner.Uid = vfs.Opt.UID
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
@@ -86,16 +85,16 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
}
|
||||
|
||||
// fill in AttrOut from node
|
||||
func (f *FS) setAttrOut(node vfs.Node, out *fuse.AttrOut) {
|
||||
func setAttrOut(node vfs.Node, out *fuse.AttrOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetTimeout(f.opt.AttrTimeout)
|
||||
out.SetTimeout(mountlib.AttrTimeout)
|
||||
}
|
||||
|
||||
// fill in EntryOut from node
|
||||
func (f *FS) setEntryOut(node vfs.Node, out *fuse.EntryOut) {
|
||||
func setEntryOut(node vfs.Node, out *fuse.EntryOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetEntryTimeout(f.opt.AttrTimeout)
|
||||
out.SetAttrTimeout(f.opt.AttrTimeout)
|
||||
out.SetEntryTimeout(mountlib.AttrTimeout)
|
||||
out.SetAttrTimeout(mountlib.AttrTimeout)
|
||||
}
|
||||
|
||||
// Translate errors from mountlib into Syscall error numbers
|
||||
|
||||
@@ -7,18 +7,27 @@ package mount2
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount2", true, mount)
|
||||
mountlib.NewMountCommand("mount2", true, Mount)
|
||||
|
||||
// Add mount to rc
|
||||
mountlib.AddRc("mount2", mount)
|
||||
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
@@ -27,12 +36,12 @@ func init() {
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: fsys.opt.AllowOther,
|
||||
AllowOther: mountlib.AllowOther,
|
||||
FsName: device,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: fsys.opt.DebugFUSE,
|
||||
MaxReadAhead: int(fsys.opt.MaxReadAhead),
|
||||
Debug: mountlib.DebugFUSE,
|
||||
MaxReadAhead: int(mountlib.MaxReadAhead),
|
||||
|
||||
// RememberInodes: true,
|
||||
// SingleThreaded: true,
|
||||
@@ -96,22 +105,22 @@ func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if fsys.opt.AllowNonEmpty {
|
||||
if mountlib.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if fsys.opt.AllowOther {
|
||||
if mountlib.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
if fsys.opt.AllowRoot {
|
||||
if mountlib.AllowRoot {
|
||||
opts = append(opts, "allow_root")
|
||||
}
|
||||
if fsys.opt.DefaultPermissions {
|
||||
if mountlib.DefaultPermissions {
|
||||
opts = append(opts, "default_permissions")
|
||||
}
|
||||
if fsys.VFS.Opt.ReadOnly {
|
||||
opts = append(opts, "ro")
|
||||
}
|
||||
if fsys.opt.WritebackCache {
|
||||
if mountlib.WritebackCache {
|
||||
log.Printf("FIXME --write-back-cache not supported")
|
||||
// FIXME opts = append(opts,fuse.WritebackCache())
|
||||
}
|
||||
@@ -147,11 +156,10 @@ func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(VFS, opt)
|
||||
fsys := NewFS(f)
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
@@ -171,28 +179,28 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
MountOptions: *mountOpts,
|
||||
EntryTimeout: &opt.AttrTimeout,
|
||||
AttrTimeout: &opt.AttrTimeout,
|
||||
EntryTimeout: &mountlib.AttrTimeout,
|
||||
AttrTimeout: &mountlib.AttrTimeout,
|
||||
// UID
|
||||
// GID
|
||||
}
|
||||
|
||||
root, err := fsys.Root()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
rawFS := fusefs.NewNodeFS(root, &opts)
|
||||
server, err := fuse.NewServer(rawFS, mountpoint, &opts.MountOptions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
//mountOpts := &fuse.MountOptions{}
|
||||
//server, err := fusefs.Mount(mountpoint, fsys, &opts)
|
||||
// server, err := fusefs.Mount(mountpoint, root, &opts)
|
||||
// if err != nil {
|
||||
// return nil, nil, err
|
||||
// return nil, nil, nil, err
|
||||
// }
|
||||
|
||||
umount := func() error {
|
||||
@@ -214,9 +222,60 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
fs.Debugf(f, "Waiting for the mount to start...")
|
||||
err = server.WaitMount()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Mount started")
|
||||
return errs, umount, nil
|
||||
return fsys.VFS, errs, umount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
vfs, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := vfs.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ var _ = (fusefs.NodeStatfser)((*Node)(nil))
|
||||
// with the Options.NullPermissions setting. If blksize is unset, 4096
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
func (n *Node) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
n.fsys.setAttrOut(n.node, out)
|
||||
setAttrOut(n.node, out)
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ var _ = (fusefs.NodeGetattrer)((*Node)(nil))
|
||||
func (n *Node) Setattr(ctx context.Context, f fusefs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(n, "in=%v", in)("out=%#v, errno=%v", &out, &errno)
|
||||
var err error
|
||||
n.fsys.setAttrOut(n.node, out)
|
||||
setAttrOut(n.node, out)
|
||||
size, ok := in.GetSize()
|
||||
if ok {
|
||||
err = n.node.Truncate(int64(size))
|
||||
@@ -158,7 +158,7 @@ func (n *Node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fu
|
||||
if entry := n.node.DirEntry(); entry != nil && entry.Size() < 0 {
|
||||
fuseFlags |= fuse.FOPEN_DIRECT_IO
|
||||
}
|
||||
return newFileHandle(handle, n.fsys), fuseFlags, 0
|
||||
return newFileHandle(handle), fuseFlags, 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeOpener)((*Node)(nil))
|
||||
@@ -197,7 +197,7 @@ func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (ino
|
||||
// FIXME
|
||||
// out.SetEntryTimeout(dt time.Duration)
|
||||
// out.SetAttrTimeout(dt time.Duration)
|
||||
n.fsys.setEntryOut(vfsNode, out)
|
||||
setEntryOut(vfsNode, out)
|
||||
|
||||
return n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}), 0
|
||||
}
|
||||
@@ -306,7 +306,7 @@ func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.En
|
||||
return nil, translateError(err)
|
||||
}
|
||||
newNode := newNode(n.fsys, newDir)
|
||||
n.fsys.setEntryOut(newNode.node, out)
|
||||
setEntryOut(newNode.node, out)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
return newInode, 0
|
||||
}
|
||||
@@ -333,7 +333,7 @@ func (n *Node) Create(ctx context.Context, name string, flags uint32, mode uint3
|
||||
if err != nil {
|
||||
return nil, nil, 0, translateError(err)
|
||||
}
|
||||
fh = newFileHandle(handle, n.fsys)
|
||||
fh = newFileHandle(handle)
|
||||
// FIXME
|
||||
// fh = &fusefs.WithFlags{
|
||||
// File: fh,
|
||||
@@ -346,7 +346,7 @@ func (n *Node) Create(ctx context.Context, name string, flags uint32, mode uint3
|
||||
if errno != 0 {
|
||||
return nil, nil, 0, errno
|
||||
}
|
||||
n.fsys.setEntryOut(vfsNode, out)
|
||||
setEntryOut(vfsNode, out)
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
fs.Debugf(nil, "attr=%#v", out.Attr)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
|
||||
@@ -4,61 +4,46 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options for creating the mount
|
||||
type Options struct {
|
||||
DebugFUSE bool
|
||||
AllowNonEmpty bool
|
||||
AllowRoot bool
|
||||
AllowOther bool
|
||||
DefaultPermissions bool
|
||||
WritebackCache bool
|
||||
Daemon bool
|
||||
MaxReadAhead fs.SizeSuffix
|
||||
// Options set by command line flags
|
||||
var (
|
||||
DebugFUSE = false
|
||||
AllowNonEmpty = false
|
||||
AllowRoot = false
|
||||
AllowOther = false
|
||||
DefaultPermissions = false
|
||||
WritebackCache = false
|
||||
Daemon = false
|
||||
MaxReadAhead fs.SizeSuffix = 128 * 1024
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
AttrTimeout = 1 * time.Second // how long the kernel caches attribute for
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
NoAppleDouble = true // use noappledouble by default
|
||||
NoAppleXattr = false // do not use noapplexattr by default
|
||||
DaemonTimeout time.Duration // OSXFUSE only
|
||||
AsyncRead bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values for creating the mount
|
||||
var DefaultOpt = Options{
|
||||
MaxReadAhead: 128 * 1024,
|
||||
AttrTimeout: 1 * time.Second, // how long the kernel caches attribute for
|
||||
NoAppleDouble: true, // use noappledouble by default
|
||||
NoAppleXattr: false, // do not use noapplexattr by default
|
||||
AsyncRead: true, // do async reads by default
|
||||
}
|
||||
AsyncRead = true // do async reads by default
|
||||
)
|
||||
|
||||
type (
|
||||
// UnmountFn is called to unmount the file system
|
||||
UnmountFn func() error
|
||||
// MountFn is called to mount the file system
|
||||
MountFn func(VFS *vfs.VFS, mountpoint string, opt *Options) (<-chan error, func() error, error)
|
||||
MountFn func(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error)
|
||||
)
|
||||
|
||||
// Global constants
|
||||
@@ -69,35 +54,7 @@ const (
|
||||
func init() {
|
||||
// DaemonTimeout defaults to non zero for macOS
|
||||
if runtime.GOOS == "darwin" {
|
||||
DefaultOpt.DaemonTimeout = 15 * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("mount", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached.")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads.")
|
||||
if runtime.GOOS == "darwin" {
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
|
||||
DaemonTimeout = 15 * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,7 +106,7 @@ func checkMountpointOverlap(root, mountpoint string) error {
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Command {
|
||||
func NewMountCommand(commandName string, hidden bool, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Hidden: hidden,
|
||||
@@ -192,9 +149,6 @@ Stopping the mount manually:
|
||||
# OS X
|
||||
umount /path/to/local/mount
|
||||
|
||||
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
|
||||
or newer on some platforms depending on the underlying FUSE library in use.
|
||||
|
||||
### Installing on Windows
|
||||
|
||||
To run rclone ` + commandName + ` on Windows, you will need to
|
||||
@@ -336,12 +290,14 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
|
||||
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
|
||||
When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
|
||||
|
||||
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
|
||||
be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
opt := Opt // make a copy of the options
|
||||
|
||||
if opt.Daemon {
|
||||
if Daemon {
|
||||
config.PassConfigKeyForDaemonization = true
|
||||
}
|
||||
|
||||
@@ -361,37 +317,36 @@ When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !opt.AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
if !AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
} else if opt.AllowNonEmpty && runtime.GOOS == "windows" {
|
||||
} else if AllowNonEmpty && runtime.GOOS == "windows" {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
|
||||
// Work out the volume name, removing special
|
||||
// characters from it if necessary
|
||||
if opt.VolumeName == "" {
|
||||
opt.VolumeName = fdst.Name() + ":" + fdst.Root()
|
||||
if VolumeName == "" {
|
||||
VolumeName = fdst.Name() + ":" + fdst.Root()
|
||||
}
|
||||
opt.VolumeName = strings.Replace(opt.VolumeName, ":", " ", -1)
|
||||
opt.VolumeName = strings.Replace(opt.VolumeName, "/", " ", -1)
|
||||
opt.VolumeName = strings.TrimSpace(opt.VolumeName)
|
||||
if runtime.GOOS == "windows" && len(opt.VolumeName) > 32 {
|
||||
opt.VolumeName = opt.VolumeName[:32]
|
||||
VolumeName = strings.Replace(VolumeName, ":", " ", -1)
|
||||
VolumeName = strings.Replace(VolumeName, "/", " ", -1)
|
||||
VolumeName = strings.TrimSpace(VolumeName)
|
||||
if runtime.GOOS == "windows" && len(VolumeName) > 32 {
|
||||
VolumeName = VolumeName[:32]
|
||||
}
|
||||
|
||||
// Start background task if --background is specified
|
||||
if opt.Daemon {
|
||||
if Daemon {
|
||||
daemonized := startBackgroundMode()
|
||||
if daemonized {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
VFS := vfs.New(fdst, &vfsflags.Opt)
|
||||
err := Mount(VFS, mountpoint, mount, &opt)
|
||||
err := Mount(fdst, mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
@@ -403,7 +358,28 @@ When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
|
||||
// Add flags
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
AddFlags(cmdFlags)
|
||||
flags.BoolVarP(cmdFlags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
// mount options
|
||||
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
|
||||
flags.BoolVarP(cmdFlags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(cmdFlags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(cmdFlags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(cmdFlags, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(cmdFlags, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
flags.DurationVarP(cmdFlags, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
|
||||
flags.StringArrayVarP(cmdFlags, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
flags.StringArrayVarP(cmdFlags, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
flags.BoolVarP(cmdFlags, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(cmdFlags, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(cmdFlags, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
flags.BoolVarP(cmdFlags, &AsyncRead, "async-read", "", AsyncRead, "Use asynchronous reads.")
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
flags.BoolVarP(cmdFlags, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
flags.BoolVarP(cmdFlags, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
|
||||
}
|
||||
|
||||
// Add in the generic flags
|
||||
vfsflags.AddFlags(cmdFlags)
|
||||
|
||||
return commandDefinition
|
||||
@@ -431,60 +407,3 @@ func ClipBlocks(b *uint64) {
|
||||
*b = max
|
||||
}
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
|
||||
if opt == nil {
|
||||
opt = &DefaultOpt
|
||||
}
|
||||
|
||||
// Mount it
|
||||
errChan, unmount, err := mount(VFS, mountpoint, opt)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
// Unmount on exit
|
||||
fnHandle := atexit.Register(func() {
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
})
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// Notify systemd
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
// Reload VFS cache on SIGHUP
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := VFS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(VFS.Fs(), "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = unmount()
|
||||
_ = sdnotify.Stopping()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,9 +10,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// MountInfo defines the configuration for a mount
|
||||
@@ -21,8 +18,6 @@ type MountInfo struct {
|
||||
MountPoint string `json:"MountPoint"`
|
||||
MountedOn time.Time `json:"MountedOn"`
|
||||
Fs string `json:"Fs"`
|
||||
MountOpt *Options
|
||||
VFSOpt *vfscommon.Options
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -58,19 +53,11 @@ This takes the following parameters
|
||||
- fs - a remote path to be mounted (required)
|
||||
- mountPoint: valid path on the local machine (required)
|
||||
- mountType: One of the values (mount, cmount, mount2) specifies the mount implementation to use
|
||||
- mountOpt: a JSON object with Mount options in.
|
||||
- vfsOpt: a JSON object with VFS options in.
|
||||
|
||||
Eg
|
||||
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
|
||||
The vfsOpt are as described in options/get and can be seen in the the
|
||||
"vfs" section when running and the mountOpt can be seen in the "mount" section.
|
||||
|
||||
rclone rc options/get
|
||||
`,
|
||||
})
|
||||
}
|
||||
@@ -82,18 +69,6 @@ func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vfsOpt := vfsflags.Opt
|
||||
err = in.GetStructMissingOK("vfsOpt", &vfsOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountOpt := Opt
|
||||
err = in.GetStructMissingOK("mountOpt", &mountOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountType, err := in.GetString("mountType")
|
||||
|
||||
mountMu.Lock()
|
||||
@@ -116,8 +91,7 @@ func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
}
|
||||
|
||||
if mountFns[mountType] != nil {
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFns[mountType](VFS, mountPoint, &mountOpt)
|
||||
_, _, unmountFn, err := mountFns[mountType](fdst, mountPoint)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
@@ -129,8 +103,6 @@ func mountRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
MountedOn: time.Now(),
|
||||
Fs: fdst.Name(),
|
||||
MountPoint: mountPoint,
|
||||
VFSOpt: &vfsOpt,
|
||||
MountOpt: &mountOpt,
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
|
||||
@@ -68,9 +68,6 @@ func TestRc(t *testing.T) {
|
||||
in := rc.Params{
|
||||
"fs": localDir,
|
||||
"mountPoint": mountPoint,
|
||||
"vfsOpt": rc.Params{
|
||||
"FilePerms": 0400,
|
||||
},
|
||||
}
|
||||
|
||||
// check file.txt is not there
|
||||
@@ -89,9 +86,6 @@ func TestRc(t *testing.T) {
|
||||
fi, err := os.Stat(filePath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), fi.Size())
|
||||
if runtime.GOOS == "linux" {
|
||||
assert.Equal(t, os.FileMode(0400), fi.Mode())
|
||||
}
|
||||
|
||||
// FIXME the OS sometimes appears to be using the mount
|
||||
// immediately after it appears so wait a moment
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package ncdu implements a text based user interface for exploring a remote
|
||||
|
||||
//+build !plan9,!solaris,!js
|
||||
//+build !plan9,!solaris
|
||||
|
||||
package ncdu
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for ncdu for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris js
|
||||
// +build plan9 solaris
|
||||
|
||||
package ncdu
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user