1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-25 05:43:21 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
10dea1ca47 vfs: fix saving from chrome without --vfs-cache-mode writes
Due to Chrome's rather complicated use of file handles when saving
files from the download windows, rclone was attempting to truncate a
closed file.

The file appeared closed due to the handling of 0 length files.

This patch removes the check for the file being closed in the
WriteFileHandle.Truncate call. This is safe because the only action
this method takes is to emit an error message if the file is the wrong
size.

See: https://forum.rclone.org/t/google-drive-cannot-save-files-directly-from-browser-to-gdrive-mounted-path/17992/
2020-07-22 10:22:40 +01:00
268 changed files with 11971 additions and 29745 deletions

View File

@@ -19,23 +19,24 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'race', 'go1.11', 'go1.12', 'go1.13']
include:
- job_name: linux
os: ubuntu-latest
go: '1.15.x'
go: '1.14.x'
modules: 'on'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.15.x'
gotags: 'cmount'
go: '1.14.x'
modules: 'on'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
@@ -43,77 +44,84 @@ jobs:
- job_name: windows_amd64
os: windows-latest
go: '1.15.x'
go: '1.14.x'
modules: 'on'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.15.x'
go: '1.14.x'
modules: 'on'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.15.x'
go: '1.14.x'
modules: 'on'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: race
os: ubuntu-latest
go: '1.14.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'on'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'on'
quicktest: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
modules: 'on'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v1
with:
fetch-depth: 0
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v1
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
@@ -128,7 +136,7 @@ jobs:
shell: bash
run: |
brew update
brew install --cask osxfuse
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
@@ -136,10 +144,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -159,22 +167,10 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
@@ -226,8 +222,8 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
@@ -235,7 +231,7 @@ jobs:
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
@@ -246,9 +242,9 @@ jobs:
.
- name: Build rclone
shell: bash
run: |
make
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=mod -v
- name: Upload artifacts
run: |

View File

@@ -15,7 +15,7 @@ jobs:
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: beta
imageName: rclone/rclone

View File

@@ -23,7 +23,7 @@ jobs:
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@1.1.0
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone

1
.gitignore vendored
View File

@@ -9,4 +9,3 @@ rclone.iml
*.test
*.log
*.iml
fuzz-build.zip

6484
MANUAL.html generated

File diff suppressed because it is too large Load Diff

4030
MANUAL.md generated

File diff suppressed because it is too large Load Diff

8241
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -7,29 +7,27 @@ RELEASE_TAG := $(shell git tag -l --points-at HEAD)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif
TAG_BRANCH := .$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
# TAG is current version + commit number + commit + branch
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
@@ -46,19 +44,20 @@ endif
.PHONY: rclone test_all vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS)
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) $(BUILD_ARGS) github.com/rclone/rclone/fstest/test_all
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
@@ -76,10 +75,10 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
# Do source code quality checks
check: rclone
@@ -165,11 +164,6 @@ validate_website: website
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -188,10 +182,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -199,23 +193,23 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif
@echo Beta release ready at $(BETA_URL)
@@ -227,33 +221,26 @@ fetch_binaries:
serve: website
cd docs && hugo server -v -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(VERSION)\" -a -v
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
@echo "Version is $(VERSION)"
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -64,7 +64,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git checkout master
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
@@ -25,13 +25,12 @@ This file describes how to make the various kinds of releases
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* make startdev
* # announce with forum post, twitter post, patreon post
Early in the next release cycle update the dependencies
@@ -42,34 +41,60 @@ Early in the next release cycle update the dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
## Making a point release
If rclone needs a point release due to some horrendous bug:
Set vars
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-stable
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
* git commit --amend
* git push
* Announce!
## Making a manual build of docker

View File

@@ -1 +1 @@
v1.53.4
v1.52.2

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -47,5 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
}

View File

@@ -76,7 +76,23 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
@@ -127,7 +143,7 @@ underlying S3 storage.`,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -921,8 +937,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// ------------------------------------------------------------

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,go1.13
package azureblob
@@ -967,7 +967,8 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat!
container, directory := f.split(dir)
if container == "" || directory != "" {
// Delegate to caller if not root of a container

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,go1.13
package azureblob

View File

@@ -1,6 +1,6 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris,go1.13
package azureblob

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.13
// +build plan9 solaris !go1.13
package azureblob

View File

@@ -1143,8 +1143,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir)
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
if bucket == "" {
return errors.New("can't purge from root")
}
@@ -1219,19 +1218,19 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
wg.Wait()
if !oldOnly {
checkErr(f.Rmdir(ctx, dir))
checkErr(f.Rmdir(ctx, ""))
}
return errReturn
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
}
// copy does a server side copy from dstObj <- srcObj
@@ -1673,8 +1672,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set and not using DownloadURL otherwise by name
if o.id != "" && o.fs.opt.DownloadURL == "" {
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else {
bucket, bucketPath := o.split()

View File

@@ -87,23 +87,26 @@ func init() {
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(jsonFile, boxSubType, name, m)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
} else {
err = oauthutil.Config("box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
@@ -111,9 +114,6 @@ func init() {
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -149,7 +149,7 @@ func init() {
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -247,7 +247,6 @@ type Options struct {
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
}
// Fs represents a remote box
@@ -386,22 +385,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
root = parsePath(root)
client := fshttp.NewClient(fs.Config)
var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
@@ -411,30 +404,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// If using an accessToken, set the Authorization header
if f.opt.AccessToken != "" {
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
// Get rootFolderID
@@ -856,8 +842,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// move a file or folder
@@ -1272,10 +1258,8 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime(ctx)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -361,10 +361,15 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(remotePath)
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -385,7 +390,6 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
cleanupChan: make(chan bool, 1),
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
@@ -1698,20 +1702,17 @@ func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes()
}
// Purge all files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if dir == "" {
// FIXME this isn't quite right as it should purge the dir prefix
fs.Infof(f, "purging cache")
f.cache.Purge()
}
// Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache")
f.cache.Purge()
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
return nil
}
err := do(ctx, dir)
err := do(ctx)
if err != nil {
return err
}

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
// +build !race
package cache_test
@@ -16,6 +16,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strings"
"testing"
@@ -30,10 +31,13 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -49,7 +53,9 @@ const (
var (
remoteName string
mountDir string
uploadDir string
useMount bool
runInstance *run
errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{
@@ -85,7 +91,9 @@ var (
func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
}
// TestMain drives the tests
@@ -93,7 +101,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -266,6 +274,31 @@ func TestInternalObjNotFound(t *testing.T) {
require.Nil(t, obj)
}
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
@@ -661,6 +694,79 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
@@ -808,9 +914,15 @@ func TestInternalBug2117(t *testing.T) {
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
mntDir string
tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool
wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File
dbPath string
chunkPath string
@@ -820,7 +932,9 @@ type run struct {
func newRun() *run {
var err error
r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
}
// Read in all the defaults for all the options
@@ -833,6 +947,32 @@ func newRun() *run {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
@@ -946,21 +1086,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background(), "")
_ = f.Features().Purge(context.Background())
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err)
@@ -1000,11 +1152,37 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
}
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data)
var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
}
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in)
defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
}
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
@@ -1021,6 +1199,10 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj
}
@@ -1028,16 +1210,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error
var obj fs.Object
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
if r.useMount {
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
}
require.NoError(t, err)
return obj
@@ -1047,12 +1239,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset
checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
if r.useMount {
f, err := os.Open(path.Join(r.mntDir, remote))
defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
@@ -1075,19 +1285,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
}
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote)
var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
}
require.NoError(t, err)
}
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
if r.useMount {
err = os.Remove(path.Join(r.mntDir, remote))
} else {
err = obj.Remove(context.Background())
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
err = obj.Remove(context.Background())
}
}
return err
@@ -1096,10 +1315,18 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
}
return l, err
}
@@ -1128,7 +1355,13 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().DirMove != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil {
return err
@@ -1144,7 +1377,13 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Move != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1164,7 +1403,13 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Copy != nil {
if r.useMount {
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1184,6 +1429,13 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return time.Time{}, err
@@ -1194,6 +1446,13 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return int64(0), err
@@ -1204,15 +1463,28 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
}
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err
}

21
backend/cache/cache_mount_other_test.go vendored Normal file
View File

@@ -0,0 +1,21 @@
// +build !linux !go1.13
// +build !darwin !go1.13
// +build !freebsd !go1.13
// +build !windows
// +build !race
package cache_test
import (
"testing"
"github.com/rclone/rclone/fs"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
panic("mountFs not defined for this platform")
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
panic("unmountFs not defined for this platform")
}

79
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,79 @@
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
// +build !race
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,125 @@
// +build windows
// +build !race
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,6 +1,6 @@
// Test Cache filesystem interface
// +build !plan9,!js
// +build !plan9
// +build !race
package cache_test

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 js
// +build plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
// +build !race
package cache_test

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -24,7 +24,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -121,8 +120,6 @@ const maxTransactionProbes = 100
// standard chunker errors
var (
ErrChunkOverflow = errors.New("chunk number overflow")
ErrMetaTooBig = errors.New("metadata is too big")
ErrMetaUnknown = errors.New("unknown metadata, please upgrade rclone")
)
// variants of baseMove's parameter delMode
@@ -241,18 +238,15 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
}
baseName, basePath, err := fspath.Parse(remote)
baseInfo, baseName, basePath, baseConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
if baseName != "" {
baseName += ":"
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(baseName + remotePath)
baseFs, err := baseInfo.NewFs(baseName, remotePath, baseConfig)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", baseName, remotePath)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
@@ -264,7 +258,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
root: rpath,
opt: *opt,
}
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
@@ -278,7 +271,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := cache.Get(baseName + firstChunkPath)
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
if testErr == fs.ErrorIsFile {
err = testErr
}
@@ -298,8 +291,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
ServerSideAcrossConfigs: true,
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -695,47 +686,43 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
switch entry := dirOrObject.(type) {
case fs.Object:
remote := entry.Remote()
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote)
if mainRemote == "" {
// this is meta object or standalone file
object := f.newObject("", entry, nil)
byRemote[remote] = object
tempEntries = append(tempEntries, object)
break
}
// this is some kind of chunk
// metobject should have been created above if present
isSpecial := xactID != "" || ctrlType != ""
mainObject := byRemote[mainRemote]
if mainObject == nil && f.useMeta && !isSpecial {
fs.Debugf(f, "skip orphan data chunk %q", remote)
break
}
if mainObject == nil && !f.useMeta {
// this is the "nometa" case
// create dummy chunked object without metadata
mainObject = f.newObject(mainRemote, nil, nil)
byRemote[mainRemote] = mainObject
if !badEntry[mainRemote] {
tempEntries = append(tempEntries, mainObject)
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
if xactID != "" {
if revealHidden {
fs.Infof(f, "ignore temporary chunk %q", remote)
}
break
}
}
if isSpecial {
if revealHidden {
fs.Infof(f, "ignore non-data chunk %q", remote)
if ctrlType != "" {
if revealHidden {
fs.Infof(f, "ignore control chunk %q", remote)
}
break
}
// need to read metadata to ensure actual object type
if f.useMeta && mainObject != nil && mainObject.size <= maxMetadataSize {
mainObject.unsure = true
mainObject := byRemote[mainRemote]
if mainObject == nil && f.useMeta {
fs.Debugf(f, "skip chunk %q without meta object", remote)
break
}
if mainObject == nil {
// useMeta is false - create chunked object without metadata
mainObject = f.newObject(mainRemote, nil, nil)
byRemote[mainRemote] = mainObject
if !badEntry[mainRemote] {
tempEntries = append(tempEntries, mainObject)
}
}
if err := mainObject.addChunk(entry, chunkNo); err != nil {
if f.opt.FailHard {
return nil, err
}
badEntry[mainRemote] = true
}
break
}
if err := mainObject.addChunk(entry, chunkNo); err != nil {
if f.opt.FailHard {
return nil, err
}
badEntry[mainRemote] = true
}
object := f.newObject("", entry, nil)
byRemote[remote] = object
tempEntries = append(tempEntries, object)
case fs.Directory:
isSubdir[entry.Remote()] = true
wrapDir := fs.NewDirCopy(ctx, entry)
@@ -790,13 +777,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// but opening even a small file can be slow on some backends.
//
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.scanObject(ctx, remote, false)
}
// scanObject is like NewObject with optional quick scan mode.
// The quick mode avoids directory requests other than `List`,
// ignores non-chunked objects and skips chunk size checks.
func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.Object, error) {
if err := f.forbidChunk(false, remote); err != nil {
return nil, errors.Wrap(err, "can't access")
}
@@ -857,15 +837,8 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
continue // bypass regexp to save cpu
}
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote {
continue // skip non-conforming chunks
}
if ctrlType != "" || xactID != "" {
if f.useMeta {
// temporary/control chunk calls for lazy metadata read
o.unsure = true
}
continue
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
continue // skip non-conforming, temporary and control chunks
}
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
if err := o.addChunk(entry, chunkNo); err != nil {
@@ -875,7 +848,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
if o.main == nil && (o.chunks == nil || len(o.chunks) == 0) {
// Scanning hasn't found data chunks with conforming names.
if f.useMeta || quickScan {
if f.useMeta {
// Metadata is required but absent and there are no chunks.
return nil, fs.ErrorObjectNotFound
}
@@ -898,10 +871,8 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
// file without metadata. Validate it and update the total data size.
// As an optimization, skip metadata reading here - we will call
// readMetadata lazily when needed (reading can be expensive).
if !quickScan {
if err := o.validate(); err != nil {
return nil, err
}
if err := o.validate(); err != nil {
return nil, err
}
return o, nil
}
@@ -910,24 +881,13 @@ func (o *Object) readMetadata(ctx context.Context) error {
if o.isFull {
return nil
}
if !o.f.useMeta || (!o.isComposite() && !o.unsure) {
if !o.isComposite() || !o.f.useMeta {
o.isFull = true
return nil
}
// validate metadata
metaObject := o.main
if metaObject.Size() > maxMetadataSize {
if o.unsure {
// this is not metadata but a foreign object
o.unsure = false
o.chunks = nil // make isComposite return false
o.isFull = true // cache results
return nil
}
return ErrMetaTooBig
}
reader, err := metaObject.Open(ctx)
if err != nil {
return err
@@ -940,22 +900,8 @@ func (o *Object) readMetadata(ctx context.Context) error {
switch o.f.opt.MetaFormat {
case "simplejson":
metaInfo, madeByChunker, err := unmarshalSimpleJSON(ctx, metaObject, metadata)
if o.unsure {
o.unsure = false
if !madeByChunker {
// this is not metadata but a foreign object
o.chunks = nil // make isComposite return false
o.isFull = true // cache results
return nil
}
}
switch err {
case nil:
// fall thru
case ErrMetaTooBig, ErrMetaUnknown:
return err // return these errors unwrapped for unit tests
default:
metaInfo, err := unmarshalSimpleJSON(ctx, metaObject, metadata, true)
if err != nil {
return errors.Wrap(err, "invalid metadata")
}
if o.size != metaInfo.Size() || len(o.chunks) != metaInfo.nChunks {
@@ -970,27 +916,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
}
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
basePut putFn, action string, target fs.Object) (obj fs.Object, err error) {
if err := f.forbidChunk(src, remote); err != nil {
return nil, errors.Wrap(err, action+" refused")
}
if target == nil {
// Get target object with a quick directory scan
if obj, err := f.scanObject(ctx, remote, true); err == nil {
target = obj
}
}
if target != nil {
obj := target.(*Object)
if err := obj.readMetadata(ctx); err == ErrMetaUnknown {
// refuse to update a file of unsupported format
return nil, errors.Wrap(err, "refusing to "+action)
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption, basePut putFn) (obj fs.Object, err error) {
c := f.newChunkingReader(src)
wrapIn := c.wrapStream(ctx, in, src)
@@ -1027,8 +953,6 @@ func (f *Fs) put(
}
info := f.wrapInfo(src, chunkRemote, size)
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
c.chunkLimit = c.chunkSize
// TODO: handle range/limit options
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
if errChunk != nil {
@@ -1080,8 +1004,8 @@ func (f *Fs) put(
// Check for input that looks like valid metadata
needMeta := len(c.chunks) > 1
if c.readCount <= maxMetadataSize && len(c.chunks) == 1 {
_, madeByChunker, _ := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead)
needMeta = madeByChunker
_, err := unmarshalSimpleJSON(ctx, c.chunks[0], c.smallHead, false)
needMeta = err == nil
}
// Finalize small object as non-chunked.
@@ -1237,14 +1161,10 @@ func (c *chunkingReader) updateHashes() {
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
if c.chunkLimit <= 0 {
// Chunk complete - switch to next one.
// Note #1:
// We might not get here because some remotes (eg. box multi-uploader)
// read the specified size exactly and skip the concluding EOF Read.
// Then a check in the put loop will kick in.
// Note #2:
// The crypt backend after receiving EOF here will call Read again
// and we must insist on returning EOF, so we postpone refilling
// chunkLimit to the main loop.
c.chunkLimit = c.chunkSize
return 0, io.EOF
}
if int64(len(buf)) > c.chunkLimit {
@@ -1328,16 +1248,29 @@ func (f *Fs) removeOldChunks(ctx context.Context, remote string) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, src.Remote(), options, f.base.Put, "put", nil)
if err := f.forbidChunk(src, src.Remote()); err != nil {
return nil, errors.Wrap(err, "refusing to put")
}
return f.put(ctx, in, src, src.Remote(), options, f.base.Put)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream, "upload", nil)
if err := f.forbidChunk(src, src.Remote()); err != nil {
return nil, errors.Wrap(err, "refusing to upload")
}
return f.put(ctx, in, src, src.Remote(), options, f.base.Features().PutStream)
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.f.forbidChunk(o, o.Remote()); err != nil {
return errors.Wrap(err, "update refused")
}
if err := o.readMetadata(ctx); err != nil {
// refuse to update a file of unsupported format
return errors.Wrap(err, "refusing to update")
}
basePut := o.f.base.Put
if src.Size() < 0 {
basePut = o.f.base.Features().PutStream
@@ -1345,7 +1278,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("wrapped file system does not support streaming uploads")
}
}
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut, "update", o)
oNew, err := o.f.put(ctx, in, src, o.Remote(), options, basePut)
if err == nil {
*o = *oNew.(*Object)
}
@@ -1400,7 +1333,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.base.Rmdir(ctx, dir)
}
// Purge all files in the directory
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
@@ -1411,12 +1344,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
do := f.base.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
return do(ctx)
}
// Remove an object (chunks and metadata, if any)
@@ -1459,7 +1392,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// to corrupt file in hard mode. Hence, refuse to Remove, too.
return errors.Wrap(err, "refuse to corrupt")
}
if err := o.readMetadata(ctx); err == ErrMetaUnknown {
if err := o.readMetadata(ctx); err != nil {
// Proceed but warn user that unexpected things can happen.
fs.Errorf(o, "Removing a file with unsupported metadata: %v", err)
}
@@ -1487,11 +1420,6 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
if err := f.forbidChunk(o, remote); err != nil {
return nil, errors.Wrapf(err, "can't %s", opName)
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
}
if !o.isComposite() {
fs.Debugf(o, "%s non-chunked object...", opName)
oResult, err := do(ctx, o.mainChunk(), remote) // chain operation to a single wrapped chunk
@@ -1500,6 +1428,11 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
}
return f.newObject("", oResult, nil), nil
}
if err := o.readMetadata(ctx); err != nil {
// Refuse to copy/move composite files with invalid or future
// metadata format which might involve unsupported chunk types.
return nil, errors.Wrapf(err, "can't %s this file", opName)
}
fs.Debugf(o, "%s %d data chunks...", opName, len(o.chunks))
mainRemote := o.remote
@@ -1590,10 +1523,6 @@ func (f *Fs) okForServerSide(ctx context.Context, src fs.Object, opName string)
return
}
if obj.unsure {
// ensure object is composite if need to re-read metadata
_ = obj.readMetadata(ctx)
}
requireMetaHash := obj.isComposite() && f.opt.MetaFormat == "simplejson"
if !requireMetaHash && !f.hashAll {
ok = true // hash is not required for metadata
@@ -1777,7 +1706,6 @@ type Object struct {
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
isFull bool // true if metadata has been read
unsure bool // true if need to read metadata to detect object type
md5 string
sha1 string
f *Fs
@@ -1928,16 +1856,15 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
// on the level of wrapped remote but chunker is unaware of that.
//
func (o *Object) Hash(ctx context.Context, hashType hash.Type) (string, error) {
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
}
if !o.isComposite() {
// First, chain to the wrapped non-chunked file if possible.
if value, err := o.mainChunk().Hash(ctx, hashType); err == nil && value != "" {
return value, nil
}
}
if err := o.readMetadata(ctx); err != nil {
return "", err // valid metadata is required to get hash, abort
}
// Try hash from metadata if the file is composite or if wrapped remote fails.
switch hashType {
case hash.MD5:
@@ -1962,13 +1889,13 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
}
if err := o.readMetadata(ctx); err != nil {
// refuse to open unsupported format
return nil, errors.Wrap(err, "can't open")
}
if !o.isComposite() {
return o.mainChunk().Open(ctx, options...) // chain to wrapped non-chunked file
}
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
@@ -2241,57 +2168,57 @@ func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 s
// handled by current implementation.
// The version check below will then explicitly ask user to upgrade rclone.
//
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte) (info *ObjectInfo, madeByChunker bool, err error) {
func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte, strictChecks bool) (info *ObjectInfo, err error) {
// Be strict about JSON format
// to reduce possibility that a random small file resembles metadata.
if data != nil && len(data) > maxMetadataSize {
return nil, false, ErrMetaTooBig
return nil, errors.New("too big")
}
if data == nil || len(data) < 2 || data[0] != '{' || data[len(data)-1] != '}' {
return nil, false, errors.New("invalid json")
return nil, errors.New("invalid json")
}
var metadata metaSimpleJSON
err = json.Unmarshal(data, &metadata)
if err != nil {
return nil, false, err
return nil, err
}
// Basic fields are strictly required
// to reduce possibility that a random small file resembles metadata.
if metadata.Version == nil || metadata.Size == nil || metadata.ChunkNum == nil {
return nil, false, errors.New("missing required field")
return nil, errors.New("missing required field")
}
// Perform strict checks, avoid corruption of future metadata formats.
if *metadata.Version < 1 {
return nil, false, errors.New("wrong version")
return nil, errors.New("wrong version")
}
if *metadata.Size < 0 {
return nil, false, errors.New("negative file size")
return nil, errors.New("negative file size")
}
if *metadata.ChunkNum < 0 {
return nil, false, errors.New("negative number of chunks")
return nil, errors.New("negative number of chunks")
}
if *metadata.ChunkNum > maxSafeChunkNumber {
return nil, true, ErrChunkOverflow // produced by incompatible version of rclone
return nil, ErrChunkOverflow
}
if metadata.MD5 != "" {
_, err = hex.DecodeString(metadata.MD5)
if len(metadata.MD5) != 32 || err != nil {
return nil, false, errors.New("wrong md5 hash")
return nil, errors.New("wrong md5 hash")
}
}
if metadata.SHA1 != "" {
_, err = hex.DecodeString(metadata.SHA1)
if len(metadata.SHA1) != 40 || err != nil {
return nil, false, errors.New("wrong sha1 hash")
return nil, errors.New("wrong sha1 hash")
}
}
// ChunkNum is allowed to be 0 in future versions
if *metadata.ChunkNum < 1 && *metadata.Version <= metadataVersion {
return nil, false, errors.New("wrong number of chunks")
return nil, errors.New("wrong number of chunks")
}
// Non-strict mode also accepts future metadata versions
if *metadata.Version > metadataVersion {
return nil, true, ErrMetaUnknown // produced by incompatible version of rclone
if *metadata.Version > metadataVersion && strictChecks {
return nil, fmt.Errorf("version %d is not supported, please upgrade rclone", metadata.Version)
}
var nilFs *Fs // nil object triggers appropriate type method
@@ -2299,7 +2226,7 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
info.nChunks = *metadata.ChunkNum
info.md5 = metadata.MD5
info.sha1 = metadata.SHA1
return info, true, nil
return info, nil
}
func silentlyRemove(ctx context.Context, o fs.Object) {

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -159,25 +158,24 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first
var wrappedFs fs.Fs
if rpath == "" {
wrappedFs, err = cache.Get(remote)
} else {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
wrappedFs, err = cache.Get(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(remotePath)
}
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
}
f := &Fs{
Fs: wrappedFs,
@@ -186,7 +184,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
@@ -430,18 +427,18 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
}
// Purge all files in the directory specified
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, f.cipher.EncryptDirName(dir))
return do(ctx)
}
// Copy src to this remote using server side copy operations.

View File

@@ -17,6 +17,7 @@ import (
"log"
"mime"
"net/http"
"net/url"
"path"
"sort"
"strconv"
@@ -36,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
@@ -69,7 +69,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
)
@@ -157,17 +157,6 @@ func driveScopesContainsAppFolder(scopes []string) bool {
return false
}
func driveOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
}
opts = append(opts, opt)
}
return opts
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -203,7 +192,13 @@ func init() {
log.Fatalf("Failed to configure team drive: %v", err)
}
},
Options: append(driveOAuthOptions(), []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nSetting your own is recommended.",
}, {
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{
@@ -229,6 +224,9 @@ Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
Note that if this is blank, the first time rclone runs it will fill it
in with the ID of the root folder.
`,
}, {
Name: "service_account_file",
@@ -291,11 +289,6 @@ commands (copy, sync, etc), and with all other commands too.`,
Default: false,
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "starred_only",
Default: false,
Help: "Only show files that are starred.",
Advanced: true,
}, {
Name: "formats",
Default: "",
@@ -357,15 +350,27 @@ date is used.`,
Help: "Size of listing chunk 100-1000. 0 to disable.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.`,
Name: "impersonate",
Default: "",
Help: `Impersonate this user when using a service account.
Note that if this is used then "root_folder_id" will be ignored.
`,
Advanced: true,
}, {
Name: "alternate_export",
Default: false,
Help: "Deprecated: no longer needed",
Hide: fs.OptionHideBoth,
Help: `Use alternate export URLs for google documents export.,
If this option is set this instructs rclone to use an alternate set of
export URLs for drive documents. Users have reported that the
official export URLs can't export large documents, whereas these
unofficial ones can.
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background,
[this google drive issue](https://issuetracker.google.com/issues/36761333) and
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
Advanced: true,
}, {
Name: "upload_cutoff",
Default: defaultChunkSize,
@@ -489,7 +494,7 @@ If this flag is set then rclone will ignore shortcut files completely.
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}}...),
}},
})
// register duplicate MIME types first
@@ -519,7 +524,6 @@ type Options struct {
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
StarredOnly bool `config:"starred_only"`
Extensions string `config:"formats"`
ExportExtensions string `config:"export_formats"`
ImportExtensions string `config:"import_formats"`
@@ -528,6 +532,7 @@ type Options struct {
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
@@ -702,7 +707,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
}
query = append(query, q)
}
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
@@ -714,16 +718,8 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if parentsQuery.Len() > 1 {
_, _ = parentsQuery.WriteString(" or ")
}
if (f.opt.SharedWithMe || f.opt.StarredOnly) && dirID == f.rootFolderID {
if f.opt.SharedWithMe {
_, _ = parentsQuery.WriteString("sharedWithMe=true")
}
if f.opt.StarredOnly {
if f.opt.SharedWithMe {
_, _ = parentsQuery.WriteString(" and ")
}
_, _ = parentsQuery.WriteString("starred=true")
}
if f.opt.SharedWithMe && dirID == f.rootFolderID {
_, _ = parentsQuery.WriteString("sharedWithMe=true")
} else {
_, _ = fmt.Fprintf(parentsQuery, "'%s' in parents", dirID)
}
@@ -933,32 +929,55 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
if !config.Confirm(false) {
return nil
}
f, err := newFs(name, "", m)
client, err := createOAuthClient(opt, name, m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list teamdrives")
return errors.Wrap(err, "config team drive failed to create oauth client")
}
svc, err := drive.New(client)
if err != nil {
return errors.Wrap(err, "config team drive failed to make drive client")
}
fmt.Printf("Fetching team drive list...\n")
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return err
}
if len(teamDrives) == 0 {
fmt.Printf("No team drives found in your account")
return nil
}
var driveIDs, driveNames []string
for _, teamDrive := range teamDrives {
driveIDs = append(driveIDs, teamDrive.Id)
driveNames = append(driveNames, teamDrive.Name)
listTeamDrives := svc.Teamdrives.List().PageSize(100)
listFailed := false
var defaultFs Fs // default Fs with default Options
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(err)
})
if err != nil {
fmt.Printf("Listing team drives failed: %v\n", err)
listFailed = true
break
}
for _, drive := range teamDrives.TeamDrives {
driveIDs = append(driveIDs, drive.Id)
driveNames = append(driveNames, drive.Name)
}
if teamDrives.NextPageToken == "" {
break
}
listTeamDrives.PageToken(teamDrives.NextPageToken)
}
var driveID string
if !listFailed && len(driveIDs) == 0 {
fmt.Printf("No team drives found in your account")
} else {
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
}
driveID := config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil
}
// newPacer makes a pacer configured for drive
func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
@@ -1041,11 +1060,9 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// newFs partially constructs Fs from the path
//
// It constructs a valid Fs but doesn't attempt to figure out whether
// it is a file or a directory.
func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -1075,7 +1092,7 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst))),
pacer: newPacer(opt),
m: m,
grouping: listRGrouping,
listRmu: new(sync.Mutex),
@@ -1105,26 +1122,25 @@ func newFs(name, path string, m configmap.Mapper) (*Fs, error) {
}
}
return f, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
f, err := newFs(name, path, m)
if err != nil {
return nil, err
// If impersonating warn about root_folder_id if set and unset it
//
// This is because rclone v1.51 and v1.52 cached root_folder_id when
// using impersonate which they shouldn't have done. It is possible
// someone is using impersonate and root_folder_id in which case this
// breaks their workflow. There isn't an easy way around that.
if opt.RootFolderID != "" && opt.RootFolderID != "appDataFolder" && opt.Impersonate != "" {
fs.Logf(f, "Ignoring cached root_folder_id when using --drive-impersonate")
opt.RootFolderID = ""
}
// Set the root folder ID
if f.opt.RootFolderID != "" {
// use root_folder ID if set
f.rootFolderID = f.opt.RootFolderID
// set root folder for a team drive or query the user root folder
if opt.RootFolderID != "" {
// override root folder if set or cached in the config and not impersonating
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
// otherwise use team_drive if set
f.rootFolderID = f.opt.TeamDriveID
} else {
// otherwise look up the actual root ID
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
@@ -1136,24 +1152,27 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
}
}
f.rootFolderID = rootID
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
// Don't cache the root folder ID if impersonating
if opt.Impersonate == "" {
m.Set("root_folder_id", rootID)
}
}
f.dirCache = dircache.New(f.root, f.rootFolderID, f)
f.dirCache = dircache.New(root, f.rootFolderID, f)
// Parse extensions
if f.opt.Extensions != "" {
if f.opt.ExportExtensions != defaultExportExtensions {
if opt.Extensions != "" {
if opt.ExportExtensions != defaultExportExtensions {
return nil, errors.New("only one of 'formats' and 'export_formats' can be specified")
}
f.opt.Extensions, f.opt.ExportExtensions = "", f.opt.Extensions
opt.Extensions, opt.ExportExtensions = "", opt.Extensions
}
f.exportExtensions, _, err = parseExtensions(f.opt.ExportExtensions, defaultExportExtensions)
f.exportExtensions, _, err = parseExtensions(opt.ExportExtensions, defaultExportExtensions)
if err != nil {
return nil, err
}
_, f.importMimeTypes, err = parseExtensions(f.opt.ImportExtensions)
_, f.importMimeTypes, err = parseExtensions(opt.ImportExtensions)
if err != nil {
return nil, err
}
@@ -1162,7 +1181,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(f.root)
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
tempF.root = newRoot
@@ -1253,7 +1272,20 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
if err != nil {
return nil, err
}
url := info.ExportLinks[mediaType]
id := actualID(info.Id)
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
if f.opt.AlternateExport {
switch info.MimeType {
case "application/vnd.google-apps.drawing":
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
case "application/vnd.google-apps.document":
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
case "application/vnd.google-apps.spreadsheet":
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
case "application/vnd.google-apps.presentation":
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
}
}
baseObject := f.newBaseObject(remote+extension, info)
baseObject.bytes = -1
baseObject.mimeType = exportMimeType
@@ -1630,7 +1662,7 @@ func (s listRSlices) Less(i, j int) bool {
// In each cycle it will read up to grouping entries from the in channel without blocking.
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error, sendJob func(listREntry)) {
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listREntry, out chan<- error, cb func(fs.DirEntry) error) {
var dirs []string
var paths []string
var grouping int32
@@ -1709,19 +1741,26 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// https://issuetracker.google.com/issues/149522397
if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
fs.Logf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
var recycled = make([]listREntry, len(dirs))
f.listRmu.Lock()
for i := range dirs {
// Requeue the jobs
job := listREntry{id: dirs[i], path: paths[i]}
sendJob(job)
recycled[i] = listREntry{id: dirs[i], path: paths[i]}
// Make a note of these dirs - if they all turn
// out to be empty then we can re-enable grouping
f.listRempties[dirs[i]] = struct{}{}
}
f.listRmu.Unlock()
fs.Debugf(f, "Recycled %d entries", len(dirs))
// recycle these in the background so we don't deadlock
// the listR runners if they all get here
wg.Add(len(recycled))
go func() {
for _, entry := range recycled {
in <- entry
}
fs.Debugf(f, "Recycled %d entries", len(recycled))
}()
}
// If using a grouping of 1 and dir was empty then check to see if it
// is part of the group that caused grouping to be disabled.
@@ -1735,7 +1774,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// empty so must have made a mistake
if len(f.listRempties) == 0 {
if atomic.SwapInt32(&f.grouping, listRGrouping) != listRGrouping {
fs.Debugf(f, "Re-enabling ListR as previous detection was in error")
fs.Logf(f, "Re-enabling ListR as previous detection was in error")
}
}
}
@@ -1790,33 +1829,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow := []listREntry{}
listed := 0
// Send a job to the input channel if not closed. If the job
// won't fit then queue it in the overflow slice.
//
// This will not block if the channel is full.
sendJob := func(job listREntry) {
mu.Lock()
defer mu.Unlock()
if in == nil {
return
}
wg.Add(1)
select {
case in <- job:
default:
overflow = append(overflow, job)
wg.Add(-1)
}
}
// Send the entry to the caller, queueing any directories as new jobs
cb := func(entry fs.DirEntry) error {
if d, isDir := entry.(*fs.Dir); isDir {
job := listREntry{actualID(d.ID()), d.Remote()}
sendJob(job)
}
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
job := listREntry{actualID(d.ID()), d.Remote()}
select {
case in <- job:
// Adding the wg after we've entered the item is
// safe here because we know when the callback
// is called we are holding a waitgroup.
wg.Add(1)
default:
overflow = append(overflow, job)
}
}
listed++
return list.Add(entry)
}
@@ -1825,7 +1852,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
go f.listRRunner(ctx, &wg, in, out, cb)
}
go func() {
// wait until the all directories are processed
@@ -2191,9 +2218,10 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
})
}
// purgeCheck removes the dir directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes a directory
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.root, dir)
dc := f.dirCache
directoryID, err := dc.FindDir(ctx, dir, false)
@@ -2206,22 +2234,20 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return f.delete(ctx, shortcutID, f.opt.UseTrash)
}
var trashedFiles = false
if check {
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true
}
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
trashedFiles = true
return false
})
if err != nil {
return err
}
if found {
return errors.Errorf("directory not empty")
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true
}
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Name)
trashedFiles = true
return false
})
if err != nil {
return err
}
if found {
return errors.Errorf("directory not empty")
}
if root != "" {
// trash the directory if it had trashed files
@@ -2231,8 +2257,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if err != nil {
return err
}
} else if check {
return errors.New("can't purge root directory")
}
f.dirCache.FlushDir(dir)
if err != nil {
@@ -2241,13 +2265,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return nil
}
// Rmdir deletes a directory
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision of the object storage system
func (f *Fs) Precision() time.Duration {
return time.Millisecond
@@ -2265,13 +2282,13 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
isDoc := false
readDescription := false
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
isDoc = true
readDescription = true
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
@@ -2279,12 +2296,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
// Look to see if there is an existing object before we remove
// the extension from the remote
existingObject, _ := f.NewObject(ctx, remote)
// Adjust the remote name to be without the extension if we
// are about to create a doc.
if ext != "" {
if !strings.HasSuffix(remote, ext) {
fs.Debugf(src, "Can't copy - not same document type")
@@ -2293,12 +2304,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote = remote[:len(remote)-len(ext)]
}
// Look to see if there is an existing object
existingObject, _ := f.NewObject(ctx, remote)
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil {
return nil, err
}
if isDoc {
if readDescription {
// preserve the description on copy for docs
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
@@ -2330,22 +2344,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
// Google docs aren't preserving their mod time after copy, so set them explicitly
// See: https://github.com/rclone/rclone/issues/4517
//
// FIXME remove this when google fixes the problem!
if isDoc {
// A short sleep is needed here in order to make the
// change effective, without it is is ignored. This is
// probably some eventual consistency nastiness.
sleepTime := 2 * time.Second
fs.Debugf(f, "Sleeping for %v before setting the modtime to work around drive bug - see #4517", sleepTime)
time.Sleep(sleepTime)
err = newObject.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return nil, err
}
}
if existingObject != nil {
err = existingObject.Remove(ctx)
if err != nil {
@@ -2360,11 +2358,23 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
if f.root == "" {
return errors.New("can't purge root directory")
}
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
}
return f.purgeCheck(ctx, dir, false)
rootID, err := f.dirCache.RootID(ctx, false)
if err != nil {
return err
}
err = f.delete(ctx, shortcutID(rootID), f.opt.UseTrash)
f.dirCache.ResetRoot()
if err != nil {
return err
}
return nil
}
// CleanUp empties the trash
@@ -2867,98 +2877,6 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
return dstFs.newObjectWithInfo(dstPath, info)
}
// List all team drives
func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err error) {
drives = []*drive.TeamDrive{}
listTeamDrives := f.svc.Teamdrives.List().PageSize(100)
var defaultFs Fs // default Fs with default Options
for {
var teamDrives *drive.TeamDriveList
err = f.pacer.Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(err)
})
if err != nil {
return drives, errors.Wrap(err, "listing team drives failed")
}
drives = append(drives, teamDrives.TeamDrives...)
if teamDrives.NextPageToken == "" {
break
}
listTeamDrives.PageToken(teamDrives.NextPageToken)
}
return drives, nil
}
type unTrashResult struct {
Untrashed int
Errors int
}
func (r unTrashResult) Error() string {
return fmt.Sprintf("%d errors while untrashing - see log", r.Errors)
}
// Restore the trashed files from dir, directoryID recursing if needed
func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurse bool) (r unTrashResult, err error) {
directoryID = actualID(directoryID)
fs.Debugf(dir, "finding trash to restore in directory %q", directoryID)
_, err = f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
remote := path.Join(dir, item.Name)
if item.ExplicitlyTrashed {
fs.Infof(remote, "restoring %q", item.Id)
if operations.SkipDestructive(ctx, remote, "restore") {
return false
}
update := drive.File{
ForceSendFields: []string{"Trashed"}, // necessary to set false value
Trashed: false,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.svc.Files.Update(item.Id, &update).
SupportsAllDrives(true).
Fields("trashed").
Do()
return f.shouldRetry(err)
})
if err != nil {
err = errors.Wrap(err, "failed to restore")
r.Errors++
fs.Errorf(remote, "%v", err)
} else {
r.Untrashed++
}
}
if recurse && item.MimeType == "application/vnd.google-apps.folder" {
if !isShortcutID(item.Id) {
rNew, _ := f.unTrash(ctx, remote, item.Id, recurse)
r.Untrashed += rNew.Untrashed
r.Errors += rNew.Errors
}
}
return false
})
if err != nil {
err = errors.Wrap(err, "failed to list directory")
r.Errors++
fs.Errorf(dir, "%v", err)
}
if r.Errors != 0 {
return r, r
}
return r, nil
}
// Untrash dir
func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTrashResult, err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
r.Errors++
return r, err
}
return f.unTrash(ctx, dir, directoryID, true)
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@@ -3010,55 +2928,6 @@ authenticated with "drive2:" can't read files from "drive:".
Opts: map[string]string{
"target": "optional target remote for the shortcut destination",
},
}, {
Name: "drives",
Short: "List the shared drives available to this account",
Long: `This command lists the shared drives (teamdrives) available to this
account.
Usage:
rclone backend drives drive:
This will return a JSON list of objects like this
[
{
"id": "0ABCDEF-01234567890",
"kind": "drive#teamDrive",
"name": "My Drive"
},
{
"id": "0ABCDEFabcdefghijkl",
"kind": "drive#teamDrive",
"name": "Test Drive"
}
]
`,
}, {
Name: "untrash",
Short: "Untrash files and directories",
Long: `This command untrashes all the files and directories in the directory
passed in recursively.
Usage:
This takes an optional directory to trash which make this easier to
use via the API.
rclone backend untrash drive:directory
rclone backend -i untrash drive:directory subdir
Use the -i flag to see what would be restored before restoring it.
Result:
{
"Untrashed": 17,
"Errors": 0
}
`,
}}
// Command the backend to run a named command
@@ -3122,14 +2991,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
}
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
case "drives":
return f.listTeamDrives(ctx)
case "untrash":
dir := ""
if len(arg) > 0 {
dir = arg[0]
}
return f.unTrashDir(ctx, dir, true)
default:
return nil, fs.ErrorCommandNotFound
}

View File

@@ -10,16 +10,13 @@ import (
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
@@ -364,50 +361,6 @@ func (f *Fs) InternalTestShortcuts(t *testing.T) {
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -423,7 +376,6 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -125,7 +125,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Dropbox App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
@@ -155,7 +161,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -605,9 +611,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
root := path.Join(f.slashRoot, dir)
// can't remove root
@@ -615,33 +622,31 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return errors.New("can't remove root directory")
}
if check {
// check directory exists
_, err = f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
// check directory exists
_, err := f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
// remove it
@@ -652,13 +657,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Second
@@ -721,8 +719,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
})
return shouldRetry(err)
})
return err
}
// Move src to this remote using server side move operations.

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
@@ -29,20 +28,6 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}

View File

@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles

View File

@@ -88,7 +88,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
@@ -255,7 +261,7 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -841,27 +847,20 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
var rewriteResponse *storage.RewriteResponse
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
if rewriteResponse.Done {
break
}
rewriteRequest.RewriteToken(rewriteResponse.RewriteToken)
fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten)
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
// Set the metadata for the new object while we have it
dstObj.setMetaData(rewriteResponse.Resource)
dstObj.setMetaData(newObject)
return dstObj, nil
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -109,7 +110,13 @@ func init() {
`)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -132,7 +139,7 @@ you want to read the media.`,
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}}...),
}},
})
}

View File

@@ -20,6 +20,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -62,7 +63,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
})
}

View File

@@ -353,7 +353,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
@@ -373,9 +373,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
@@ -387,6 +384,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oauthConfig.ClientID = "jottacli"
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
@@ -553,7 +551,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
@@ -730,9 +728,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
if err == fs.ErrorNotAFile {
err = nil
}
return err
})
@@ -1075,8 +1070,8 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
@@ -1092,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
retry, _ := shouldRetry(resp, err)
return (retry && resp.StatusCode != 500), err
})
if err != nil {
return nil, err
@@ -1196,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
// surprise! jottacloud fucked up dirmove - the api spits out an error but
// dir gets moved regardless
if apiErr, ok := err.(*api.Error); ok {
if apiErr.StatusCode == 500 {
_, err := f.NewObject(ctx, dstRemote)
if err == fs.ErrorNotAFile {
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
return nil
}
return err
}
}
if err != nil {
return errors.Wrap(err, "couldn't move directory")
}
@@ -1469,8 +1477,6 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
md5String, err := src.Hash(ctx, hash.MD5)
if err != nil || md5String == "" {

0
backend/local/aaaa Normal file
View File

View File

@@ -4,7 +4,6 @@ package local
import (
"context"
"os"
"syscall"
"github.com/pkg/errors"
@@ -16,9 +15,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert

View File

@@ -1,4 +1,4 @@
// +build windows plan9 js
// +build windows plan9
package local

View File

@@ -1,4 +1,4 @@
// +build !windows,!plan9,!js
// +build !windows,!plan9
package local

View File

@@ -144,17 +144,6 @@ the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
the user rclone is running as does not own the file uploaded, such as
when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -177,7 +166,6 @@ type Options struct {
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -554,10 +542,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
if f.opt.NoSetModTime {
return fs.ModTimeNotSupported
}
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
@@ -616,25 +600,20 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files in the directory
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
dir = f.localPath(dir)
fi, err := f.lstat(dir)
func (f *Fs) Purge(ctx context.Context) error {
fi, err := f.lstat(f.root)
if err != nil {
// already purged
if os.IsNotExist(err) {
return fs.ErrorDirNotFound
}
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir)
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(dir)
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
@@ -899,9 +878,6 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime {
return nil
}
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
@@ -1213,7 +1189,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Errorf(o, "Failed to set sparse: %v", err)
fs.Debugf(o, "Failed to set sparse: %v", err)
}
}
@@ -1231,15 +1207,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
if runtime.GOOS == "windows" && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
}
}
// Stat an Object into info

View File

@@ -6,6 +6,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")

View File

@@ -117,7 +117,7 @@ type ListItem struct {
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime uint64 `json:"mtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
@@ -159,6 +159,71 @@ type FolderInfoResponse struct {
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/pkg/errors"
@@ -656,14 +655,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
if err != nil {
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
switch item.Kind {
case "folder":
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
@@ -677,7 +671,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(mTime, 0),
modTime: time.Unix(item.Mtime, 0),
}
return file, -1, nil
default:
@@ -1168,12 +1162,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeWithCheck(ctx, dir, true, "rmdir")
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the root directory
// Optional interface: Only implement this if you have a way of deleting
// all the files quicker than just running Remove() on the result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
// fs.Debugf(f, ">>> Purge")
return f.purgeWithCheck(ctx, dir, false, "purge")
return f.purgeWithCheck(ctx, "", false, "purge")
}
// purgeWithCheck() removes the root directory.
@@ -1867,30 +1861,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
return f.shardURL, nil
}
opts := rest.Opts{
RootURL: api.DispatchServerURL,
Method: "GET",
Path: "/u",
}
var (
res *http.Response
url string
err error
)
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
if err == nil {
url, err = readBodyWord(res)
}
return fserrors.ShouldRetry(err), err
})
token, err := f.accessToken()
if err != nil {
closeBody(res)
return "", err
}
f.shardURL = url
opts := rest.Opts{
Method: "GET",
Path: "/api/m1/dispatcher",
Parameters: url.Values{
"client_id": {api.OAuthClientID},
"access_token": {token},
},
}
var info api.ShardInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return "", err
}
f.shardURL = info.Body.Upload[0].URL
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
fs.Debugf(f, "new upload shard: %s", f.shardURL)
@@ -2122,18 +2116,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
start, end, partialRequest := getTransferRange(o.size, options...)
headers := map[string]string{
"Accept": "*/*",
"Content-Type": "application/octet-stream",
}
if partialRequest {
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
headers["Range"] = rangeStr
// headers["Content-Range"] = rangeStr
headers["Accept-Ranges"] = "bytes"
}
start, end, partial := getTransferRange(o.size, options...)
// TODO: set custom timeouts
opts := rest.Opts{
@@ -2144,7 +2127,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
"client_id": {api.OAuthClientID},
"token": {token},
},
ExtraHeaders: headers,
ExtraHeaders: map[string]string{
"Accept": "*/*",
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
},
}
var res *http.Response
@@ -2165,36 +2151,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
// Server should respond with Status 206 and Content-Range header to a range
// request. Status 200 (and no Content-Range) means a full-content response.
partialResponse := res.StatusCode == 206
var (
hasher gohash.Hash
wrapStream io.ReadCloser
)
if !partialResponse {
var hasher gohash.Hash
if !partial {
// Cannot check hash of partial download
hasher = mrhash.New()
}
wrapStream = &endHandler{
wrapStream := &endHandler{
ctx: ctx,
stream: res.Body,
hasher: hasher,
o: o,
server: server,
}
if partialRequest && !partialResponse {
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
return nil, err
}
}
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
}
return wrapStream, nil
}

View File

@@ -669,13 +669,13 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck("", false)
}
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)

View File

@@ -410,28 +410,3 @@ func (i *Item) GetParentReference() *ItemReference {
func (i *Item) IsRemote() bool {
return i.RemoteItem != nil
}
// User details for each version
type User struct {
Email string `json:"email"`
ID string `json:"id"`
DisplayName string `json:"displayName"`
}
// LastModifiedBy for each version
type LastModifiedBy struct {
User User `json:"user"`
}
// Version info
type Version struct {
ID string `json:"id"`
LastModifiedDateTime time.Time `json:"lastModifiedDateTime"`
Size int `json:"size"`
LastModifiedBy LastModifiedBy `json:"lastModifiedBy"`
}
// VersionsResponse is returned from /versions
type VersionsResponse struct {
Versions []Version `json:"value"`
}

View File

@@ -14,7 +14,6 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -27,8 +26,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
@@ -241,7 +238,13 @@ func init() {
m.Set(configDriveType, rootItem.ParentReference.DriveType)
config.SaveConfig()
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Microsoft App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Microsoft App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -281,23 +284,6 @@ different Onedrives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: "no_versions",
Default: false,
Help: `Remove all versions on modifying operations
Onedrive for business creates versions when rclone uploads new files
overwriting an existing one and when it sets the modification time.
These versions take up space out of the quota.
This flag checks for versions after file upload and setting
modification time and removes all but the last version.
**NB** Onedrive personal can't currently delete versions so don't use
this flag there.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -344,7 +330,7 @@ this flag there.
encoder.EncodeRightSpace |
encoder.EncodeWin |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -355,7 +341,6 @@ type Options struct {
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
NoVersions bool `config:"no_versions"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -1088,13 +1073,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// Move src to this remote using server side move operations.
@@ -1247,10 +1232,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, errors.Wrap(err, "about failed")
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
return &fs.Usage{}, nil
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Used), // bytes in use
@@ -1294,73 +1275,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return result.Link.WebURL, nil
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, fs.Config.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object)
if !ok {
return errors.New("internal error: not a onedrive object")
}
wg.Add(1)
token <- struct{}{}
go func() {
defer func() {
<-token
wg.Done()
}()
err := o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}()
return nil
})
wg.Wait()
return err
})
return err
}
// Finds and removes any old versions for o
func (o *Object) deleteVersions(ctx context.Context) error {
opts := newOptsCall(o.id, "GET", "/versions")
var versions api.VersionsResponse
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
if len(versions.Versions) < 2 {
return nil
}
for _, version := range versions.Versions[1:] {
err = o.deleteVersion(ctx, version.ID)
if err != nil {
return err
}
}
return nil
}
// Finds and removes any old versions for o
func (o *Object) deleteVersion(ctx context.Context, ID string) error {
if operations.SkipDestructive(ctx, fmt.Sprintf("%s of %s", ID, o.remote), "delete version") {
return nil
}
fs.Infof(o, "removing version %q", ID)
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
opts.NoResponse = true
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
})
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1524,13 +1438,6 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(resp, err)
})
// Remove versions if required
if o.fs.opt.NoVersions {
err := o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}
return info, err
}
@@ -1837,14 +1744,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If updating the file then remove versions
if o.fs.opt.NoVersions && o.hasMetaData {
err = o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}
return o.setMetaData(info)
}
@@ -1941,7 +1840,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}

View File

@@ -506,13 +506,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// Return an Object from a path
@@ -646,6 +646,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
423, // Locked - get this on folders sometimes

View File

@@ -104,9 +104,8 @@ type ItemResult struct {
// Hashes contains the supported hashes
type Hashes struct {
SHA1 string `json:"sha1"`
MD5 string `json:"md5"`
SHA256 string `json:"sha256"`
SHA1 string `json:"sha1"`
MD5 string `json:"md5"`
}
// UploadFileResponse is the response from /uploadfile

View File

@@ -103,7 +103,13 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Pcloud App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret\nLeave blank normally.",
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -122,20 +128,10 @@ func init() {
Name: "hostname",
Help: `Hostname to connect to.
This is normally set when rclone initially does the oauth connection,
however you will need to set it by hand if you are using remote config
with rclone authorize.
`,
This is normally set when rclone initially does the oauth connection.`,
Default: defaultHostname,
Advanced: true,
Examples: []fs.OptionExample{{
Value: defaultHostname,
Help: "Original/US region",
}, {
Value: "eapi.pcloud.com",
Help: "EU region",
}},
}}...),
}},
})
}
@@ -675,13 +671,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// CleanUp empties the trash
@@ -824,19 +820,14 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
}
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
obj, err := f.NewObject(ctx, path)
if err != nil {
return "", err
}
o := obj.(*Object)
opts := rest.Opts{
Method: "POST",
Path: "/getfilepublink",
Parameters: url.Values{},
}
var result api.PubLinkResult
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = f.pacer.Call(func() (bool, error) {
opts.Parameters.Set("path", path)
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
@@ -849,6 +840,11 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return "", err
}
dirID, err := f.dirCache.FindDir(ctx, remote, false)
if err == fs.ErrorDirNotFound {
return f.linkFile(ctx, remote, expire)
@@ -885,13 +881,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
// EU region supports SHA1 and SHA256 (but rclone doesn't
// support SHA256 yet).
//
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
if f.opt.Hostname == "eapi.pcloud.com" {
return hash.Set(hash.SHA1)
}
return hash.Set(hash.MD5 | hash.SHA1)
}

View File

@@ -609,13 +609,13 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// move a file or folder

View File

@@ -458,9 +458,10 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
root := strings.Trim(path.Join(f.root, dir), "/")
@@ -477,20 +478,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
}
dirID := atoi(directoryID)
if check {
// check directory empty
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(children) != 0 {
return errors.New("directory not empty")
}
// check directory empty
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(children) != 0 {
return errors.New("directory not empty")
}
// remove it
@@ -503,26 +502,35 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, true)
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
func (f *Fs) Purge(ctx context.Context) (err error) {
// defer log.Trace(f, "")("err=%v", &err)
return f.purgeCheck(ctx, dir, false)
if f.root == "" {
return errors.New("can't purge root directory")
}
rootIDs, err := f.dirCache.RootID(ctx, false)
if err != nil {
return err
}
rootID := atoi(rootIDs)
// Let putio delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "deleting file: %d", rootID)
err = f.client.Files.Delete(ctx, rootID)
return shouldRetry(err)
})
f.dirCache.ResetRoot()
return err
}
// Copy src to this remote using server side copy operations.

View File

@@ -1,7 +1,7 @@
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
// +build !plan9,!js
// +build !plan9
package qingstor

View File

@@ -1,6 +1,6 @@
// Test QingStor filesystem interface
// +build !plan9,!js
// +build !plan9
package qingstor

View File

@@ -1,6 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 js
// +build plan9
package qingstor

View File

@@ -1,6 +1,6 @@
// Upload object to QingStor
// +build !plan9,!js
// +build !plan9
package qingstor

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,7 @@ type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int64 `json:"size"`
Size int `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}

View File

@@ -584,38 +584,29 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return nil
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir removes the directory or library if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
libraryName, dirPath := f.splitPath(dir)
libraryID, err := f.getLibraryID(ctx, libraryName)
if err != nil {
return err
}
if check {
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
if err != nil {
return err
}
if len(directoryEntries) > 0 {
return fs.ErrorDirectoryNotEmpty
}
directoryEntries, err := f.getDirectoryEntries(ctx, libraryID, dirPath, false)
if err != nil {
return err
}
if len(directoryEntries) > 0 {
return fs.ErrorDirectoryNotEmpty
}
if dirPath == "" || dirPath == "/" {
return f.deleteLibrary(ctx, libraryID)
}
return f.deleteDir(ctx, libraryID, dirPath)
}
// Rmdir removes the directory or library if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// ==================== Optional Interface fs.ListRer ====================
// ListR lists the objects and directories of the Fs starting
@@ -902,14 +893,33 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// ==================== Optional Interface fs.Purger ====================
// Purge all files in the directory
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
if f.libraryName == "" {
return errors.New("Cannot delete from the root of the server. Please select a library")
}
libraryID, err := f.getLibraryID(ctx, f.libraryName)
if err != nil {
return err
}
if f.rootDirectory == "" {
// Delete library
err = f.deleteLibrary(ctx, libraryID)
if err != nil {
return err
}
return nil
}
err = f.deleteDir(ctx, libraryID, f.rootDirectory)
if err != nil {
return err
}
return nil
}
// ==================== Optional Interface fs.CleanUpper ====================
@@ -1004,7 +1014,7 @@ func (f *Fs) listLibraries(ctx context.Context) (entries fs.DirEntries, err erro
for _, library := range libraries {
d := fs.NewDir(library.Name, time.Unix(library.Modified, 0))
d.SetSize(library.Size)
d.SetSize(int64(library.Size))
entries = append(entries, d)
}

View File

@@ -164,18 +164,6 @@ Home directory can be found in a shared folder called "home"
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}, {
Name: "subsystem",
Default: "sftp",
Help: "Specifies the SSH2 subsystem on the remote host.",
Advanced: true,
}, {
Name: "server_command",
Default: "",
Help: `Specifies the path or command to run a sftp server on the remote host.
The subsystem option is ignored when server_command is defined.`,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -199,8 +187,6 @@ type Options struct {
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
}
// Fs stores the interface to the remote SFTP files
@@ -304,7 +290,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH")
}
c.sftpClient, err = f.newSftpClient(c.sshClient)
c.sftpClient, err = sftp.NewClient(c.sshClient)
if err != nil {
_ = c.sshClient.Close()
return nil, errors.Wrap(err, "couldn't initialise SFTP")
@@ -313,35 +299,6 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
return c, nil
}
// Creates a new SFTP client on conn, using the specified subsystem
// or sftp server, and zero or more option functions
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
s, err := conn.NewSession()
if err != nil {
return nil, err
}
pw, err := s.StdinPipe()
if err != nil {
return nil, err
}
pr, err := s.StdoutPipe()
if err != nil {
return nil, err
}
if f.opt.ServerCommand != "" {
if err := s.Start(f.opt.ServerCommand); err != nil {
return nil, err
}
} else {
if err := s.RequestSubsystem(f.opt.Subsystem); err != nil {
return nil, err
}
}
return sftp.NewClientPipe(pr, pw, opts...)
}
// Get an SFTP connection from the pool, or open a new one
func (f *Fs) getSftpConnection() (c *conn, err error) {
f.poolMu.Lock()
@@ -1087,7 +1044,7 @@ func shellEscape(str string) string {
func parseHash(bytes []byte) string {
// For strings with backslash *sum writes a leading \
// https://unix.stackexchange.com/q/313733/94054
return strings.ToLower(strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0]) // Split at hash / filename separator / all convert to lowercase
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
}
// Parses the byte array output from the SSH session

View File

@@ -853,8 +853,8 @@ func (f *Fs) Precision() time.Duration {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// updateItem patches a file or folder

View File

@@ -895,12 +895,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
// Caution: Deleting a folder may orphan objects. It's important
// to remove the contents of the folder before you delete the
// folder. That's because removing a folder using DELETE does not
@@ -920,7 +920,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
if f.opt.HardDelete {
return fs.ErrorCantPurge
}
return f.purgeCheck(ctx, dir, false)
return f.purgeCheck(ctx, "", false)
}
// moveFile moves a file server side

View File

@@ -840,21 +840,17 @@ func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files in the directory
// Purge deletes all the files and directories
//
// Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" {
return fs.ErrorListBucketRequired
}
func (f *Fs) Purge(ctx context.Context) error {
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
@@ -868,7 +864,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
if err != nil {
return err
}
return f.Rmdir(ctx, dir)
return f.Rmdir(ctx, "")
}
// Copy src to this remote using server side copy operations.
@@ -1115,18 +1111,13 @@ func min(x, y int64) int64 {
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
segmentsContainer, _, err := o.getSegmentsDlo()
if err != nil {
return err
}
except = path.Join(o.remote, except)
// fs.Debugf(o, "segmentsContainer %q prefix %q", segmentsContainer, prefix)
err = o.fs.listContainerRoot(segmentsContainer, o.remote, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
segmentsContainer, prefix, err := o.getSegmentsDlo()
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", remote, segmentsContainer)
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
@@ -1335,7 +1326,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["Etag"]
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {

View File

@@ -1,6 +1,7 @@
package union
import (
"bufio"
"context"
"io"
"sync"
@@ -66,9 +67,31 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
obj := entries[0].(*upstream.Object)
return obj.Update(ctx, in, src, options...)
}
// Multi-threading
readers, errChan := multiReader(len(entries), in)
// Get multiple reader
readers := make([]io.Reader, len(entries))
writers := make([]io.Writer, len(entries))
errs := Errors(make([]error, len(entries)+1))
for i := range entries {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], writers[i] = r, bw
defer func() {
err := w.Close()
if err != nil {
panic(err)
}
}()
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, len(writers)+1)
_, es[len(es)-1] = io.Copy(mw, in)
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
errs[len(entries)] = Errors(es).Err()
}()
// Multi-threading
multithread(len(entries), func(i int) {
if o, ok := entries[i].(*upstream.Object); ok {
err := o.Update(ctx, readers[i], src, options...)
@@ -77,7 +100,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errs[i] = fs.ErrorNotAFile
}
})
errs[len(entries)] = <-errChan
return errs.Err()
}

View File

@@ -3,6 +3,7 @@ package policy
import (
"context"
"math/rand"
"time"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"
@@ -19,10 +20,12 @@ type EpRand struct {
}
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
rand.Seed(time.Now().Unix())
return upstreams[rand.Intn(len(upstreams))]
}
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
rand.Seed(time.Now().Unix())
return entries[rand.Intn(len(entries))]
}

View File

@@ -145,16 +145,11 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound {
if dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
} else if dir == "" {
// If root dirs not created then create them
upstreams, err = f.upstreams, nil
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
}
if err != nil {
return err
@@ -167,13 +162,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errs.Err()
}
// Purge all files in the directory
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
for _, r := range f.upstreams {
if r.Features().Purge == nil {
return fs.ErrorCantPurge
@@ -185,10 +180,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
}
errs := Errors(make([]error, len(upstreams)))
multithread(len(upstreams), func(i int) {
err := upstreams[i].Features().Purge(ctx, dir)
if errors.Cause(err) == fs.ErrorDirNotFound {
err = nil
}
err := upstreams[i].Features().Purge(ctx)
errs[i] = errors.Wrap(err, upstreams[i].Name())
})
return errs.Err()
@@ -390,37 +382,6 @@ func (f *Fs) DirCacheFlush() {
})
}
// Tee in into n outputs
//
// When finished read the error from the channel
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
readers := make([]io.Reader, n)
pipeWriters := make([]*io.PipeWriter, n)
writers := make([]io.Writer, n)
errChan := make(chan error, 1)
for i := range writers {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], pipeWriters[i], writers[i] = r, w, bw
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, 2*n+1)
_, copyErr := io.Copy(mw, in)
es[2*n] = copyErr
// Flush the buffers
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
// Close the underlying pipes
for i, pw := range pipeWriters {
es[2*i] = pw.CloseWithError(copyErr)
}
errChan <- Errors(es).Err()
}()
return readers, errChan
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
srcPath := src.Remote()
upstreams, err := f.create(ctx, srcPath)
@@ -448,9 +409,31 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
e, err := f.wrapEntries(u.WrapObject(o))
return e.(*Object), err
}
// Multi-threading
readers, errChan := multiReader(len(upstreams), in)
errs := Errors(make([]error, len(upstreams)+1))
// Get multiple reader
readers := make([]io.Reader, len(upstreams))
writers := make([]io.Writer, len(upstreams))
for i := range writers {
r, w := io.Pipe()
bw := bufio.NewWriter(w)
readers[i], writers[i] = r, bw
defer func() {
err := w.Close()
if err != nil {
panic(err)
}
}()
}
go func() {
mw := io.MultiWriter(writers...)
es := make([]error, len(writers)+1)
_, es[len(es)-1] = io.Copy(mw, in)
for i, bw := range writers {
es[i] = bw.(*bufio.Writer).Flush()
}
errs[len(upstreams)] = Errors(es).Err()
}()
// Multi-threading
objs := make([]upstream.Entry, len(upstreams))
multithread(len(upstreams), func(i int) {
u := upstreams[i]
@@ -467,7 +450,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
}
objs[i] = u.WrapObject(o)
})
errs[len(upstreams)] = <-errChan
err = errs.Err()
if err != nil {
return nil, err
@@ -522,9 +504,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
for _, u := range f.upstreams {
usg, err := u.About(ctx)
if errors.Cause(err) == fs.ErrorDirNotFound {
continue
}
if err != nil {
return nil, err
}
@@ -823,7 +802,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,

View File

@@ -153,29 +153,3 @@ func TestPolicy2(t *testing.T) {
UnimplementableObjectMethods: []string{"MimeType"},
})
}
func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "union"},
{Name: name, Key: "upstreams", Value: upstreams},
{Name: name, Key: "action_policy", Value: "all"},
{Name: name, Key: "create_policy", Value: "all"},
{Name: name, Key: "search_policy", Value: "all"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

View File

@@ -97,7 +97,6 @@ func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
return nil, err
}
f.Fs = myFs
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@@ -336,9 +335,6 @@ func (f *Fs) updateUsageCore(lock bool) error {
usage, err := f.RootFs.Features().About(ctx)
if err != nil {
f.cacheUpdate = false
if errors.Cause(err) == fs.ErrorDirNotFound {
err = nil
}
return err
}
if lock {

View File

@@ -686,8 +686,8 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// mkParentDir makes the parent of the native path dirPath if
// necessary and any directories above that
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) {
// defer log.Trace(dirPath, "")("err=%v", &err)
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists
if strings.HasSuffix(dirPath, "/") {
dirPath = dirPath[:len(dirPath)-1]
@@ -699,27 +699,6 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) (err error) {
return f.mkdir(ctx, parent)
}
// _dirExists - list dirPath to see if it exists
//
// dirPath should be a native path ending in a /
func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) {
opts := rest.Opts{
Method: "PROPFIND",
Path: dirPath,
ExtraHeaders: map[string]string{
"Depth": "0",
},
}
var result api.Multistatus
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return f.shouldRetry(resp, err)
})
return err == nil
}
// low level mkdir, only makes the directory, doesn't attempt to create parents
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
// We assume the root is already created
@@ -740,29 +719,19 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
return f.shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// Check if it already exists. The response code for this isn't
// defined in the RFC so the implementations vary wildly.
//
// already exists
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
return nil
}
// 4shared returns a 409/StatusConflict here which clashes
// horribly with the intermediate paths don't exist meaning. So
// check to see if actually exists. This will correct other
// error codes too.
if f._dirExists(ctx, dirPath) {
return nil
}
}
return err
}
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(ctx context.Context, dirPath string) (err error) {
// defer log.Trace(dirPath, "")("err=%v", &err)
err = f._mkdir(ctx, dirPath)
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("")
err := f._mkdir(ctx, dirPath)
if apiErr, ok := err.(*api.Error); ok {
// parent does not exist so create it first then try again
if apiErr.StatusCode == http.StatusConflict {
@@ -899,13 +868,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.copyOrMove(ctx, src, remote, "COPY")
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// Move src to this remote using server side move operations.
@@ -1129,14 +1098,10 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
Path: o.filePath(),
Options: options,
ExtraHeaders: map[string]string{
"Depth": "0",
},
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)

View File

@@ -67,7 +67,13 @@ func init() {
return
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Yandex Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Yandex Client Secret\nLeave blank normally.",
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -75,7 +81,7 @@ func init() {
// it doesn't seem worth making an exception for this
Default: (encoder.Display |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -631,13 +637,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter

View File

@@ -36,7 +36,6 @@ var (
cgo = flag.Bool("cgo", false, "Use cgo for the build")
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
tags = flag.String("tags", "", "Space separated list of build tags")
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
)
@@ -46,6 +45,7 @@ var (
var osarches = []string{
"windows/386",
"windows/amd64",
"darwin/386",
"darwin/amd64",
"linux/386",
"linux/amd64",
@@ -67,7 +67,6 @@ var osarches = []string{
"plan9/386",
"plan9/amd64",
"solaris/amd64",
"js/wasm",
}
// Special environment flags for a given arch
@@ -281,7 +280,7 @@ func stripVersion(goarch string) string {
// build the binary in dir returning success or failure
func compileArch(version, goos, goarch, dir string) bool {
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
log.Printf("Compiling %s/%s", goos, goarch)
output := filepath.Join(dir, "rclone")
if goos == "windows" {
output += ".exe"
@@ -299,17 +298,11 @@ func compileArch(version, goos, goarch, dir string) bool {
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-i",
"-o", output,
"-tags", *tags,
}
if *buildmode != "" {
args = append(args,
"-buildmode", *buildmode,
)
}
args = append(args,
"..",
)
}
env := []string{
"GOOS=" + goos,
"GOARCH=" + stripVersion(goarch),
@@ -328,16 +321,14 @@ func compileArch(version, goos, goarch, dir string) bool {
return false
}
if !*compileOnly {
if goos != "js" {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
}
if *copyAs != "" {
for _, artifact := range artifacts {
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
}
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
}
if *copyAs != "" {
for _, artifact := range artifacts {
run("ln", artifact, strings.Replace(artifact, "-"+version, "-"+*copyAs, 1))
}
}
// tidy up

View File

@@ -15,12 +15,10 @@ description: |
vendor: "rclone"
homepage: "https://rclone.org"
license: "MIT"
contents:
- src: ./rclone
dst: /usr/bin/rclone
- src: ./README.html
dst: /usr/share/doc/rclone/README.html
- src: ./README.txt
dst: /usr/share/doc/rclone/README.txt
- src: ./rclone.1
dst: /usr/share/man/man1/rclone.1
# No longer supported? See https://github.com/goreleaser/nfpm/issues/144
# bindir: "/usr/bin"
files:
./rclone: "/usr/bin/rclone"
./README.html: "/usr/share/doc/rclone/README.html"
./README.txt: "/usr/share/doc/rclone/README.txt"
./rclone.1: "/usr/share/man/man1/rclone.1"

View File

@@ -1,26 +0,0 @@
#!/bin/bash
# Thrash the VFS tests
set -e
# Optionally set the iterations with the first parameter
iterations=${1:-100}
base=$(dirname $(dirname $(realpath "$0")))
echo ${base}
run=${base}/bin/test-repeat.sh
echo ${run}
testdirs="
vfs
vfs/vfscache
vfs/vfscache/writeback
vfs/vfscache/downloaders
cmd/cmount
"
for testdir in ${testdirs}; do
echo "Testing ${testdir} with ${iterations} iterations"
cd ${base}/${testdir}
${run} -i=${iterations} -race -tags=cmount
done

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cachestats

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 js
// +build plan9
package cachestats

View File

@@ -29,7 +29,6 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
AddFlags(cmdFlags)
}
@@ -51,7 +50,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
and |--error| flags write paths, one per line, to the file name (or
stdout if it is |-|) supplied. What they write is described in the
help below. For example |--differ| will write all paths which are

View File

@@ -14,7 +14,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible.`,
Short: `Clean up the remote if possible`,
Long: `
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.

View File

@@ -9,6 +9,7 @@ package cmd
import (
"fmt"
"log"
"math/rand"
"os"
"os/exec"
"path"
@@ -34,7 +35,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -89,10 +89,8 @@ func NewFsFile(remote string) (fs.Fs, string) {
f, err := cache.Get(remote)
switch err {
case fs.ErrorIsFile:
cache.Pin(f) // pin indefinitely since it was on the CLI
return f, path.Base(fsPath)
case nil:
cache.Pin(f) // pin indefinitely since it was on the CLI
return f, ""
default:
err = fs.CountError(err)
@@ -141,7 +139,6 @@ func newFsDir(remote string) fs.Fs {
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
cache.Pin(f) // pin indefinitely since it was on the CLI
return f
}
@@ -200,7 +197,6 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
_ = fs.CountError(err)
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
}
cache.Pin(fdst) // pin indefinitely since it was on the CLI
return
}
@@ -512,9 +508,7 @@ func AddBackendFlags() {
// Main runs rclone interpreting flags and commands out of os.Args
func Main() {
if err := random.Seed(); err != nil {
log.Fatalf("Fatal error: %v", err)
}
rand.Seed(time.Now().Unix())
setupRootCommand(Root)
AddBackendFlags()
if err := Root.Execute(); err != nil {

View File

@@ -17,6 +17,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
const fhUnset = ^uint64(0)
@@ -31,10 +32,10 @@ type FS struct {
}
// NewFS makes a new FS
func NewFS(VFS *vfs.VFS) *FS {
func NewFS(f fs.Fs) *FS {
fsys := &FS{
VFS: VFS,
f: VFS.Fs(),
VFS: vfs.New(f, &vfsflags.Opt),
f: f,
ready: make(chan (struct{})),
}
return fsys
@@ -217,18 +218,12 @@ func (fsys *FS) Readdir(dirPath string,
itemsRead := -1
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
dir, errc := fsys.lookupDir(dirPath)
node, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
}
// We can't seek in directories and FUSE should know that so
// return an error if ofst is ever set.
if ofst > 0 {
return -fuse.ESPIPE
}
nodes, err := dir.ReadDirAll()
items, err := node.Readdir(-1)
if err != nil {
return translateError(err)
}
@@ -237,7 +232,7 @@ func (fsys *FS) Readdir(dirPath string,
// for getattr (but FUSE only looks at st_ino and the
// file-type bits of st_mode).
//
// We have called host.SetCapReaddirPlus() so WinFsp will
// FIXME If you call host.SetCapReaddirPlus() then WinFsp will
// use the full stat information - a Useful optimization on
// Windows.
//
@@ -248,19 +243,25 @@ func (fsys *FS) Readdir(dirPath string,
// directory is read in a single readdir operation.
fill(".", nil, 0)
fill("..", nil, 0)
for _, node := range nodes {
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
for _, item := range items {
node, ok := item.(vfs.Node)
if ok {
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
if usingReaddirPlus {
// We have called host.SetCapReaddirPlus() so supply the stat information
var stat fuse.Stat_t
_ = fsys.stat(node, &stat) // not capable of returning an error
fill(name, &stat, 0)
} else {
fill(name, nil, 0)
}
}
// We have called host.SetCapReaddirPlus() so supply the stat information
// It is very cheap at this point so supply it regardless of OS capabilities
var stat fuse.Stat_t
_ = fsys.stat(node, &stat) // not capable of returning an error
fill(name, &stat, 0)
}
itemsRead = len(nodes)
itemsRead = len(items)
return 0
}
@@ -289,65 +290,41 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
return 0
}
// OpenEx opens a file
func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fi.Fh = fhUnset
// translate the fuse flags to os flags
flags := translateOpenFlags(fi.Flags)
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
if err != nil {
return translateError(err)
}
// If size unknown then use direct io to read
if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 {
fi.DirectIo = true
}
fi.Fh = fsys.openHandle(handle)
return 0
}
// Open opens a file
func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
var fi = fuse.FileInfo_t{
Flags: flags,
}
errc = fsys.OpenEx(path, &fi)
return errc, fi.Fh
}
defer log.Trace(path, "flags=0x%X", flags)("errc=%d, fh=0x%X", &errc, &fh)
// CreateEx creates and opens a file.
func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fi.Fh = fhUnset
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
}
file, err := parentDir.Create(leaf, fi.Flags)
if err != nil {
return translateError(err)
}
// translate the fuse flags to os flags
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
handle, err := file.Open(flags)
flags = translateOpenFlags(flags)
handle, err := fsys.VFS.OpenFile(path, flags, 0777)
if err != nil {
return translateError(err)
return translateError(err), fhUnset
}
fi.Fh = fsys.openHandle(handle)
return 0
// FIXME add support for unknown length files setting direct_io
// See: https://github.com/billziss-gh/cgofuse/issues/38
return 0, fsys.openHandle(handle)
}
// Create creates and opens a file.
func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh uint64) {
var fi = fuse.FileInfo_t{
Flags: flags,
defer log.Trace(filePath, "flags=0x%X, mode=0%o", flags, mode)("errc=%d, fh=0x%X", &errc, &fh)
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc, fhUnset
}
errc = fsys.CreateEx(filePath, mode, &fi)
return errc, fi.Fh
file, err := parentDir.Create(leaf, flags)
if err != nil {
return translateError(err), fhUnset
}
// translate the fuse flags to os flags
flags = translateOpenFlags(flags) | os.O_CREATE
handle, err := file.Open(flags)
if err != nil {
return translateError(err), fhUnset
}
return 0, fsys.openHandle(handle)
}
// Truncate truncates a file to size
@@ -618,12 +595,3 @@ func translateOpenFlags(inFlags int) (outFlags int) {
// NB O_SYNC isn't defined by fuse
return outFlags
}
// Make sure interfaces are satisfied
var (
_ fuse.FileSystemInterface = (*FS)(nil)
_ fuse.FileSystemOpenEx = (*FS)(nil)
//_ fuse.FileSystemChflags = (*FS)(nil)
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
)

View File

@@ -11,15 +11,26 @@ package cmount
import (
"fmt"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
const (
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
// capability [Windows only]. A file system that has the readdir-plus capability can send
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
usingReaddirPlus = runtime.GOOS == "windows"
)
func init() {
@@ -27,91 +38,78 @@ func init() {
if runtime.GOOS == "windows" {
name = "mount"
}
mountlib.NewMountCommand(name, false, mount)
mountlib.NewMountCommand(name, false, Mount)
// Add mount to rc
mountlib.AddRc("cmount", mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
func mountOptions(device string, mountpoint string) (options []string) {
// Options
options = []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead),
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", fmt.Sprintf("attr_timeout=%g", mountlib.AttrTimeout.Seconds()),
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
}
if opt.DebugFUSE {
if mountlib.DebugFUSE {
options = append(options, "-o", "debug")
}
// OSX options
if runtime.GOOS == "darwin" {
if opt.NoAppleDouble {
if mountlib.NoAppleDouble {
options = append(options, "-o", "noappledouble")
}
if opt.NoAppleXattr {
if mountlib.NoAppleXattr {
options = append(options, "-o", "noapplexattr")
}
}
// determine if ExtraOptions already has an opt in
hasOption := func(optionName string) bool {
optionName += "="
for _, option := range opt.ExtraOptions {
if strings.HasPrefix(option, optionName) {
return true
}
}
return false
}
// Windows options
if runtime.GOOS == "windows" {
// These cause WinFsp to mean the current user
if !hasOption("uid") {
options = append(options, "-o", "uid=-1")
}
if !hasOption("gid") {
options = append(options, "-o", "gid=-1")
}
options = append(options, "-o", "uid=-1")
options = append(options, "-o", "gid=-1")
options = append(options, "--FileSystemName=rclone")
}
if runtime.GOOS == "darwin" || runtime.GOOS == "windows" {
if opt.VolumeName != "" {
options = append(options, "-o", "volname="+opt.VolumeName)
if mountlib.VolumeName != "" {
options = append(options, "-o", "volname="+mountlib.VolumeName)
}
}
if opt.AllowNonEmpty {
if mountlib.AllowNonEmpty {
options = append(options, "-o", "nonempty")
}
if opt.AllowOther {
if mountlib.AllowOther {
options = append(options, "-o", "allow_other")
}
if opt.AllowRoot {
if mountlib.AllowRoot {
options = append(options, "-o", "allow_root")
}
if opt.DefaultPermissions {
if mountlib.DefaultPermissions {
options = append(options, "-o", "default_permissions")
}
if VFS.Opt.ReadOnly {
if vfsflags.Opt.ReadOnly {
options = append(options, "-o", "ro")
}
if opt.WritebackCache {
if mountlib.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
}
if opt.DaemonTimeout != 0 {
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
if mountlib.DaemonTimeout != 0 {
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(mountlib.DaemonTimeout.Seconds())))
}
for _, option := range opt.ExtraOptions {
for _, option := range mountlib.ExtraOptions {
options = append(options, "-o", option)
}
for _, option := range opt.ExtraFlags {
for _, option := range mountlib.ExtraFlags {
options = append(options, option)
}
return options
@@ -137,39 +135,35 @@ func waitFor(fn func() bool) (ok bool) {
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
f := VFS.Fs()
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
// Check the mountpoint - in Windows the mountpoint mustn't exist before the mount
if runtime.GOOS != "windows" {
fi, err := os.Stat(mountpoint)
if err != nil {
return nil, nil, errors.Wrap(err, "mountpoint")
return nil, nil, nil, errors.Wrap(err, "mountpoint")
}
if !fi.IsDir() {
return nil, nil, errors.New("mountpoint is not a directory")
return nil, nil, nil, errors.New("mountpoint is not a directory")
}
}
// Create underlying FS
fsys := NewFS(VFS)
fsys := NewFS(f)
host := fuse.NewFileSystemHost(fsys)
host.SetCapReaddirPlus(true) // only works on Windows
if usingReaddirPlus {
host.SetCapReaddirPlus(true)
}
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
// Create options
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
fs.Debugf(f, "Mounting with options: %q", options)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
defer func() {
if r := recover(); r != nil {
errChan <- errors.Errorf("mount failed: %v", r)
}
}()
var err error
ok := host.Mount(mountpoint, options)
if !ok {
@@ -205,7 +199,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
select {
case err := <-errChan:
err = errors.Wrap(err, "mount stopped before calling Init")
return nil, nil, err
return nil, nil, nil, err
case <-fsys.ready:
}
@@ -220,5 +214,53 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
}
}
return errChan, unmount, nil
return fsys.VFS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
// Mount it
FS, errChan, unmount, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
// Note cgofuse unmounts the fs on SIGINT etc
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
_ = sdnotify.Stopping()
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -1,32 +0,0 @@
// Build for macos with the brew tag to handle the absence
// of fuse and print an appropriate error message
// +build brew
// +build darwin
package cmount
import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/vfs"
)
func init() {
name := "mount"
cmd := mountlib.NewMountCommand(name, false, mount)
cmd.Aliases = append(cmd.Aliases, "cmount")
mountlib.AddRc("cmount", mount)
}
// mount the file system
//
// The mount point will be ready when this returns.
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(_ *vfs.VFS, _ string, _ *mountlib.Options) (<-chan error, func() error, error) {
return nil, nil, errors.New("mount is not supported on MacOS when installed via Homebrew. " +
"Please install the binaries available at https://rclone." +
"org/downloads/ instead if you want to use the mount command")
}

View File

@@ -1,6 +1,6 @@
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !linux,!darwin,!freebsd,!windows !brew !cgo !cmount
// +build !linux,!darwin,!freebsd,!windows !cgo !cmount
package cmount

View File

@@ -22,7 +22,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied.`,
Short: `Copy files from source to dest, skipping already copied`,
Long: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or

View File

@@ -15,7 +15,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied.`,
Short: `Copy files from source to dest, skipping already copied`,
Long: `
If source:path is a file or directory then it copies it to a file or
directory named dest:path.

View File

@@ -22,32 +22,19 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "dedupe [mode] remote:path",
Short: `Interactively find duplicate filenames and delete/rename them.`,
Short: `Interactively find duplicate files and delete/rename them.`,
Long: `
By default ` + "`dedupe`" + ` interactively finds files with duplicate
names and offers to delete all but one or rename them to be
different.
This is only useful with backends like Google Drive which can have
duplicate file names. It can be run on wrapping backends (eg crypt) if
they wrap a backend which supports duplicate file names.
By default ` + "`" + `dedupe` + "`" + ` interactively finds duplicate files and offers to
delete all but one or rename them to be different. Only useful with
Google Drive which can have duplicate file names.
In the first pass it will merge directories with the same name. It
will do this iteratively until all the identically named directories
have been merged.
will do this iteratively until all the identical directories have been
merged.
In the second pass, for every group of duplicate file names, it will
delete all but one identical files it finds without confirmation.
This means that for most duplicated files the ` + "`dedupe`" + `
command will not be interactive.
` + "`dedupe`" + ` considers files to be identical if they have the
same hash. If the backend does not support hashes (eg crypt wrapping
Google Drive) then they will never be found to be identical. If you
use the ` + "`--size-only`" + ` flag then files will be considered
identical if they have the same size (any hash will be ignored). This
can be useful on crypt backends which do not support hashes.
The ` + "`" + `dedupe` + "`" + ` command will delete all but one of any identical (same
md5sum) files it finds without confirmation. This means that for most
duplicated files the ` + "`" + `dedupe` + "`" + ` command will not be interactive.
**Important**: Since this can cause data loss, test first with the
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
@@ -65,26 +52,26 @@ Before - with duplicates
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
Now the ` + "`dedupe`" + ` session
Now the ` + "`" + `dedupe` + "`" + ` session
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 files with duplicate names
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: Found 4 duplicates - deleting identical copies
one.txt: Deleting 2/3 identical duplicates (md5sum "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 files with duplicates names
two.txt: Found 3 duplicates - deleting identical copies
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
1: 564374 bytes, 2016-03-05 16:22:52.118000000, md5sum 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, md5sum 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, md5sum 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)

View File

@@ -44,7 +44,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "lsf remote:path",
Short: `List directories and objects in remote:path formatted for parsing.`,
Short: `List directories and objects in remote:path formatted for parsing`,
Long: `
List the contents of the source path (directories and objects) to
standard output in a form which is easy to parse by scripts. By

View File

@@ -19,7 +19,6 @@ import (
// Dir represents a directory entry
type Dir struct {
*vfs.Dir
fsys *FS
}
// Check interface satisfied
@@ -28,7 +27,7 @@ var _ fusefs.Node = (*Dir)(nil)
// Attr updates the attributes of a directory
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
a.Valid = d.fsys.opt.AttrTimeout
a.Valid = mountlib.AttrTimeout
a.Gid = d.VFS().Opt.GID
a.Uid = d.VFS().Opt.UID
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
@@ -76,7 +75,7 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
if err != nil {
return nil, translateError(err)
}
resp.EntryValid = d.fsys.opt.AttrTimeout
resp.EntryValid = mountlib.AttrTimeout
// Check the mnode to see if it has a fuse Node cached
// We must return the same fuse nodes for vfs Nodes
node, ok := mnode.Sys().(fusefs.Node)
@@ -85,9 +84,9 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
}
switch x := mnode.(type) {
case *vfs.File:
node = &File{x, d.fsys}
node = &File{x}
case *vfs.Dir:
node = &Dir{x, d.fsys}
node = &Dir{x}
default:
panic("bad type")
}
@@ -107,13 +106,6 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
if err != nil {
return nil, translateError(err)
}
dirents = append(dirents, fuse.Dirent{
Type: fuse.DT_Dir,
Name: ".",
}, fuse.Dirent{
Type: fuse.DT_Dir,
Name: "..",
})
for _, node := range items {
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
@@ -147,7 +139,7 @@ func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.Cr
if err != nil {
return nil, nil, translateError(err)
}
node = &File{file, d.fsys}
node = &File{file}
file.SetSys(node) // cache the FUSE node for later
return node, &FileHandle{fh}, err
}
@@ -161,7 +153,7 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.No
if err != nil {
return nil, translateError(err)
}
node = &Dir{dir, d.fsys}
node = &Dir{dir}
dir.SetSys(node) // cache the FUSE node for later
return node, nil
}

View File

@@ -8,6 +8,7 @@ import (
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
@@ -15,7 +16,6 @@ import (
// File represents a file
type File struct {
*vfs.File
fsys *FS
}
// Check interface satisfied
@@ -24,7 +24,7 @@ var _ fusefs.Node = (*File)(nil)
// Attr fills out the attributes for the file
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
a.Valid = f.fsys.opt.AttrTimeout
a.Valid = mountlib.AttrTimeout
modTime := f.File.ModTime()
Size := uint64(f.File.Size())
Blocks := (Size + 511) / 512

View File

@@ -15,24 +15,23 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
// FS represents the top level filing system
type FS struct {
*vfs.VFS
f fs.Fs
opt *mountlib.Options
f fs.Fs
}
// Check interface satisfied
var _ fusefs.FS = (*FS)(nil)
// NewFS makes a new FS
func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS {
func NewFS(f fs.Fs) *FS {
fsys := &FS{
VFS: VFS,
f: VFS.Fs(),
opt: opt,
VFS: vfs.New(f, &vfsflags.Opt),
f: f,
}
return fsys
}
@@ -44,7 +43,7 @@ func (f *FS) Root() (node fusefs.Node, err error) {
if err != nil {
return nil, translateError(err)
}
return &Dir{root, f}, nil
return &Dir{root}, nil
}
// Check interface satisfied

View File

@@ -6,67 +6,74 @@ package mount
import (
"fmt"
"runtime"
"os"
"os/signal"
"syscall"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
func init() {
mountlib.NewMountCommand("mount", false, mount)
mountlib.NewMountCommand("mount", false, Mount)
// Add mount to rc
mountlib.AddRc("mount", mount)
}
// mountOptions configures the options from the command line flags
func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options []fuse.MountOption) {
func mountOptions(device string) (options []fuse.MountOption) {
options = []fuse.MountOption{
fuse.MaxReadahead(uint32(opt.MaxReadAhead)),
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device),
fuse.VolumeName(opt.VolumeName),
fuse.VolumeName(mountlib.VolumeName),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.WritebackCache(),
}
if opt.AsyncRead {
if mountlib.AsyncRead {
options = append(options, fuse.AsyncRead())
}
if opt.NoAppleDouble {
if mountlib.NoAppleDouble {
options = append(options, fuse.NoAppleDouble())
}
if opt.NoAppleXattr {
if mountlib.NoAppleXattr {
options = append(options, fuse.NoAppleXattr())
}
if opt.AllowNonEmpty {
if mountlib.AllowNonEmpty {
options = append(options, fuse.AllowNonEmptyMount())
}
if opt.AllowOther {
if mountlib.AllowOther {
options = append(options, fuse.AllowOther())
}
if opt.AllowRoot {
if mountlib.AllowRoot {
// options = append(options, fuse.AllowRoot())
fs.Errorf(nil, "Ignoring --allow-root. Support has been removed upstream - see https://github.com/bazil/fuse/issues/144 for more info")
}
if opt.DefaultPermissions {
if mountlib.DefaultPermissions {
options = append(options, fuse.DefaultPermissions())
}
if VFS.Opt.ReadOnly {
if vfsflags.Opt.ReadOnly {
options = append(options, fuse.ReadOnly())
}
if opt.WritebackCache {
if mountlib.WritebackCache {
options = append(options, fuse.WritebackCache())
}
if opt.DaemonTimeout != 0 {
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(opt.DaemonTimeout.Seconds()))))
if mountlib.DaemonTimeout != 0 {
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(mountlib.DaemonTimeout.Seconds()))))
}
if len(opt.ExtraOptions) > 0 {
if len(mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
}
if len(opt.ExtraFlags) > 0 {
if len(mountlib.ExtraFlags) > 0 {
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
}
return options
@@ -78,25 +85,14 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
//
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
if runtime.GOOS == "darwin" {
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
}
if opt.DebugFUSE {
fuse.Debug = func(msg interface{}) {
fs.Debugf("fuse", "%v", msg)
}
}
f := VFS.Fs()
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
filesys := NewFS(VFS, opt)
filesys := NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
@@ -113,7 +109,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
return nil, nil, err
return nil, nil, nil, err
}
unmount := func() error {
@@ -122,5 +118,63 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
return fuse.Unmount(mountpoint)
}
return errChan, unmount, nil
return filesys.VFS, errChan, unmount, nil
}
// Mount mounts the remote at mountpoint.
//
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
if mountlib.DebugFUSE {
fuse.Debug = func(msg interface{}) {
fs.Debugf("fuse", "%v", msg)
}
}
// Mount it
FS, errChan, unmount, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
sigInt := make(chan os.Signal, 1)
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.IgnoreSignals()
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}
waitloop:
for {
select {
// umount triggered outside the app
case err = <-errChan:
break waitloop
// Program abort: umount
case <-sigInt:
err = unmount()
break waitloop
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := FS.Root()
if err != nil {
fs.Errorf(f, "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
_ = sdnotify.Stopping()
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")
}
return nil
}

View File

@@ -1,7 +1,7 @@
// Build for mount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// Invert the build constraint: linux,go1.13 freebsd,go1.13
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
//
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))

View File

@@ -33,15 +33,13 @@ import (
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
// for an example.
type FileHandle struct {
h vfs.Handle
fsys *FS
h vfs.Handle
}
// Create a new FileHandle
func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
func newFileHandle(h vfs.Handle) *FileHandle {
return &FileHandle{
h: h,
fsys: fsys,
h: h,
}
}
@@ -117,7 +115,7 @@ var _ fusefs.FileFsyncer = (*FileHandle)(nil)
// is assumed, and the 'blocks' field is set accordingly.
func (f *FileHandle) Getattr(ctx context.Context, out *fuse.AttrOut) (errno syscall.Errno) {
defer log.Trace(f, "")("attr=%v, errno=%v", &out, &errno)
f.fsys.setAttrOut(f.h.Node(), out)
setAttrOut(f.h.Node(), out)
return 0
}
@@ -127,7 +125,7 @@ var _ fusefs.FileGetattrer = (*FileHandle)(nil)
func (f *FileHandle) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
defer log.Trace(f, "in=%v", in)("attr=%v, errno=%v", &out, &errno)
var err error
f.fsys.setAttrOut(f.h.Node(), out)
setAttrOut(f.h.Node(), out)
size, ok := in.GetSize()
if ok {
err = f.h.Truncate(int64(size))

Some files were not shown because too many files have changed in this diff Show More