1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-04 17:43:50 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
cbe049b520 cmount: support --mount-tpslimit flag
See: https://forum.rclone.org/t/mac-os-big-sur-mount-error/21726
2021-02-07 13:01:07 +00:00
Nick Craig-Wood
a662d17105 mountlib: add --mount-tpslimit flag 2021-02-07 12:57:21 +00:00
257 changed files with 4865 additions and 17611 deletions

View File

@@ -19,12 +19,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
go: '1.16.0-rc1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -32,25 +32,18 @@ jobs:
racequicktest: true
deploy: true
- job_name: mac_amd64
- job_name: mac
os: macOS-latest
go: '1.16.x'
go: '1.16.0-rc1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.16.x'
go: '1.16.0-rc1'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
@@ -60,7 +53,7 @@ jobs:
- job_name: windows_386
os: windows-latest
go: '1.16.x'
go: '1.16.0-rc1'
gotags: cmount
goarch: '386'
cgo: '1'
@@ -71,8 +64,8 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
go: '1.16.0-rc1'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
@@ -131,7 +124,7 @@ jobs:
shell: bash
run: |
brew update
brew install --cask macfuse
brew install --cask osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
@@ -213,98 +206,50 @@ jobs:
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
# Upgrade together with NDK version
- name: Set up Go 1.14
uses: actions/setup-go@v1
with:
go-version: 1.14
steps:
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;21.4.7075529" | grep -v = || true
- name: Checkout
uses: actions/checkout@v1
with:
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: build native rclone
run: |
make
- name: Build rclone
shell: bash
run: |
make
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_HOME/ndk/21.4.7075529/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
- name: Upload artifacts
run: |
make ci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

1
.gitignore vendored
View File

@@ -1,7 +1,6 @@
*~
_junk/
rclone
rclone.exe
build
docs/public
rclone.iml

View File

@@ -72,7 +72,7 @@ Make sure you
When you are done with that
git push -u origin my-new-feature
git push origin my-new-feature
Go to the GitHub website and click [Create pull
request](https://help.github.com/articles/creating-a-pull-request/).
@@ -99,7 +99,7 @@ rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
go test -v ./...
rclone contains a mixture of unit tests and integration tests.
Because it is difficult (and in some respects pointless) to test cloud
storage systems by mocking all their interfaces, rclone unit tests can
@@ -115,8 +115,8 @@ are skipped if `TestDrive:` isn't defined.
cd backend/drive
go test -v
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
You can then run the integration tests which tests all of rclone's
operations. Normally these get run against the local filing system,
but they can be run against any of the remotes.
cd fs/sync
@@ -127,7 +127,7 @@ but they can be run against any of the remotes.
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the
all together with an HTML report and test retries then from the
project root:
go install github.com/rclone/rclone/fstest/test_all
@@ -202,7 +202,7 @@ for the flag help, the remainder is shown to the user in `rclone
config` and is added to the docs with `make backenddocs`.
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, web site, etc. are all auto generated
files. The MANUAL.*, rclone.1, web site, etc. are all auto generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
@@ -265,7 +265,7 @@ rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
rclone can be built with modules outside of the `GOPATH`.
rclone can be built with modules outside of the GOPATH
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
@@ -333,8 +333,8 @@ Getting going
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests
@@ -400,7 +400,7 @@ Usage
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository

View File

@@ -16,8 +16,6 @@ RUN apk --no-cache add ca-certificates fuse tzdata && \
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone
ENTRYPOINT [ "rclone" ]
WORKDIR /data

1291
MANUAL.html generated

File diff suppressed because it is too large Load Diff

1787
MANUAL.md generated

File diff suppressed because it is too large Load Diff

1867
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -93,7 +93,7 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64\.tar\.gz'
cd /tmp && go get github.com/goreleaser/nfpm/v2/...
# Get the release dependencies we only install on Windows
release_dep_windows:
@@ -119,7 +119,7 @@ doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs rcdocs
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
@@ -187,10 +187,10 @@ upload_github:
./bin/upload-github $(TAG)
cross: doc
go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -release current $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
beta:
go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -198,7 +198,7 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
go run bin/cross-compile.go -compile-only $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(BUILD_ARGS) $(TAG)
ci_upload:
sudo chown -R $$USER build

View File

@@ -76,24 +76,6 @@ Now
The rclone docker image should autobuild on via GitHub actions. If it doesn't
or needs to be updated then rebuild like this.
See: https://github.com/ilteoood/docker_buildx/issues/19
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
```
git co v1.54.1
docker pull golang
export DOCKER_CLI_EXPERIMENTAL=enabled
docker buildx create --name actions_builder --use
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
docker buildx stop actions_builder
```
### Old build for linux/amd64 only
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .

View File

@@ -11,7 +11,6 @@ import (
_ "github.com/rclone/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/require"
)
@@ -20,7 +19,7 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.LoadConfig(context.Background())
config.LoadConfig(context.Background())
// Configure the remote
config.FileSet(remoteName, "type", "alias")

View File

@@ -205,10 +205,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
if resp.StatusCode == 401 {
f.tokenRenewer.Invalidate()
@@ -283,7 +280,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.getRootInfo(ctx)
_, err := f.getRootInfo()
return err
})
@@ -291,14 +288,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
_, resp, err = f.c.Account.GetEndpoints()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get endpoints")
}
// Get rootID
rootInfo, err := f.getRootInfo(ctx)
rootInfo, err := f.getRootInfo()
if err != nil || rootInfo.Id == nil {
return nil, errors.Wrap(err, "failed to get root")
}
@@ -340,11 +337,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// getRootInfo gets the root folder info
func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
rootInfo, resp, err = f.c.Nodes.GetRoot()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return rootInfo, err
}
@@ -383,7 +380,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -410,7 +407,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -431,7 +428,7 @@ type listAllFn func(*acd.Node) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
query := "parents:" + dirID
if directoriesOnly {
query += " AND kind:" + folderKind
@@ -452,7 +449,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, title string, directorie
var resp *http.Response
err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return false, err
@@ -511,7 +508,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var iErr error
for tries := 1; tries <= maxTries; tries++ {
entries = nil
_, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
remote := path.Join(dir, *node.Name)
switch *node.Kind {
case folderKind:
@@ -670,7 +667,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if ok {
return false, nil
}
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -711,7 +708,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
err = f.moveNode(srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
if err != nil {
return nil, err
}
@@ -806,7 +803,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var jsonStr string
err = srcFs.pacer.Call(func() (bool, error) {
jsonStr, err = srcInfo.GetMetadata()
return srcFs.shouldRetry(ctx, nil, err)
return srcFs.shouldRetry(nil, err)
})
if err != nil {
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
@@ -818,7 +815,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return err
}
err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
err = f.moveNode(srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
if err != nil {
return err
}
@@ -843,7 +840,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if check {
// check directory is empty
empty := true
_, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
_, err = f.listAll(rootID, "", false, false, func(node *acd.Node) bool {
switch *node.Kind {
case folderKind:
empty = false
@@ -868,7 +865,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = node.Trash()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -990,7 +987,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
if err == acd.ErrorNodeNotFound {
@@ -1047,7 +1044,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
}
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
return in, err
}
@@ -1070,7 +1067,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if ok {
return false, nil
}
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1080,70 +1077,70 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove a node
func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
func (f *Fs) removeNode(info *acd.Node) error {
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = info.Trash()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.removeNode(ctx, o.info)
return o.fs.removeNode(o.info)
}
// Restore a node
func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Restore()
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return newInfo, err
}
// Changes name of given node
func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return newInfo, err
}
// Replaces one parent with another, effectively moving the file. Leaves other
// parents untouched. ReplaceParent cannot be used when the file is trashed.
func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
func (f *Fs) replaceParent(info *acd.Node, oldParentID string, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.ReplaceParent(oldParentID, newParentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// Adds one additional parent to object.
func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
func (f *Fs) addParent(info *acd.Node, newParentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.AddParent(newParentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// Remove given parent from object, leaving the other possible
// parents untouched. Object can end up having no parents.
func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
func (f *Fs) removeParent(info *acd.Node, parentID string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := info.RemoveParent(parentID)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID
func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
cantMove := fs.ErrorCantMove
if useDirErrorMsgs {
@@ -1157,7 +1154,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
if srcLeaf != dstLeaf {
// fs.Debugf(name, "renaming")
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: quick path rename failed: %v", err)
goto OnConflict
@@ -1165,7 +1162,7 @@ func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string,
}
if srcDirectoryID != dstDirectoryID {
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
return err
@@ -1178,13 +1175,13 @@ OnConflict:
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
// fs.Debugf(name, "Trashing file")
err = f.removeNode(ctx, srcInfo)
err = f.removeNode(srcInfo)
if err != nil {
fs.Debugf(name, "Move: remove node failed: %v", err)
return err
}
// fs.Debugf(name, "Renaming file")
_, err = f.renameNode(ctx, srcInfo, dstLeaf)
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debugf(name, "Move: rename node failed: %v", err)
return err
@@ -1192,19 +1189,19 @@ OnConflict:
// note: replacing parent is forbidden by API, modifying them individually is
// okay though
// fs.Debugf(name, "Adding target parent")
err = f.addParent(ctx, srcInfo, dstDirectoryID)
err = f.addParent(srcInfo, dstDirectoryID)
if err != nil {
fs.Debugf(name, "Move: addParent failed: %v", err)
return err
}
// fs.Debugf(name, "removing original parent")
err = f.removeParent(ctx, srcInfo, srcDirectoryID)
err = f.removeParent(srcInfo, srcDirectoryID)
if err != nil {
fs.Debugf(name, "Move: removeParent failed: %v", err)
return err
}
// fs.Debugf(name, "Restoring")
_, err = f.restoreNode(ctx, srcInfo)
_, err = f.restoreNode(srcInfo)
if err != nil {
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
return err

View File

@@ -217,23 +217,6 @@ This option controls how often unused buffers will be removed from the pool.`,
encoder.EncodeDel |
encoder.EncodeBackSlash |
encoder.EncodeRightPeriod),
}, {
Name: "public_access",
Help: "Public access level of a container: blob, container.",
Default: string(azblob.PublicAccessNone),
Examples: []fs.OptionExample{
{
Value: string(azblob.PublicAccessNone),
Help: "The container and its blobs can be accessed only with an authorized request. It's a default value",
}, {
Value: string(azblob.PublicAccessBlob),
Help: "Blob data within this container can be read via anonymous request.",
}, {
Value: string(azblob.PublicAccessContainer),
Help: "Allow full public read access for container and blob data.",
},
},
Advanced: true,
}},
})
}
@@ -258,7 +241,6 @@ type Options struct {
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
}
// Fs represents a remote azure server
@@ -280,7 +262,6 @@ type Fs struct {
imdsPacer *fs.Pacer // Same but for IMDS
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
publicAccess azblob.PublicAccessType // Container Public Access Level
}
// Object describes an azure object
@@ -354,19 +335,6 @@ func validateAccessTier(tier string) bool {
}
}
// validatePublicAccess checks if azureblob supports use supplied public access level
func validatePublicAccess(publicAccess string) bool {
switch publicAccess {
case string(azblob.PublicAccessNone),
string(azblob.PublicAccessBlob),
string(azblob.PublicAccessContainer):
// valid cases
return true
default:
return false
}
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
@@ -379,10 +347,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
@@ -534,11 +499,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive))
}
if !validatePublicAccess((opt.PublicAccess)) {
return nil, errors.Errorf("Azure Blob: Supported public access level are %s and %s",
string(azblob.PublicAccessBlob), string(azblob.PublicAccessContainer))
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
@@ -557,7 +517,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.MemoryPoolUseMmap,
),
}
f.publicAccess = azblob.PublicAccessType(opt.PublicAccess)
f.imdsPacer.SetRetries(5) // per IMDS documentation
f.setRoot(root)
f.features = (&fs.Features{
@@ -619,7 +578,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Retry as specified by the documentation:
// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance
token, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -635,7 +594,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var refreshedToken adal.Token
err := f.imdsPacer.Call(func() (bool, error) {
refreshedToken, err = GetMSIToken(ctx, userMSI)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
// Failed to refresh.
@@ -844,7 +803,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -1070,7 +1029,7 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return err
@@ -1122,7 +1081,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, f.publicAccess)
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
@@ -1139,7 +1098,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
}
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}, nil)
}
@@ -1177,10 +1136,10 @@ func (f *Fs) deleteContainer(ctx context.Context, container string) error {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
})
}
@@ -1253,7 +1212,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1414,7 +1373,7 @@ func (o *Object) readMetaData() (err error) {
var blobProperties *azblob.BlobGetPropertiesResponse
err = o.fs.pacer.Call(func() (bool, error) {
blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
@@ -1449,7 +1408,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
blob := o.getBlobReference()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -1492,7 +1451,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var downloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
@@ -1633,7 +1592,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Stream contents of the reader object to the given blob URL
blockBlobURL := blob.ToBlockBlobURL()
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -1661,7 +1620,7 @@ func (o *Object) Remove(ctx context.Context) error {
ac := azblob.BlobAccessConditions{}
return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
}
@@ -1690,7 +1649,7 @@ func (o *Object) SetTier(tier string) error {
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {

View File

@@ -305,10 +305,7 @@ var retryErrorCodes = []int{
// shouldRetryNoAuth returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
// For 429 or 503 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.
@@ -339,7 +336,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
}
return true, err
}
return f.shouldRetryNoReauth(ctx, resp, err)
return f.shouldRetryNoReauth(resp, err)
}
// errorHandler parses a non 2xx error response into an error
@@ -482,9 +479,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
f.setRoot(oldRoot)
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
@@ -507,7 +507,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
return f.shouldRetryNoReauth(ctx, resp, err)
return f.shouldRetryNoReauth(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to authenticate")
@@ -1744,13 +1744,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
ContentType: resp.Header.Get("Content-Type"),
Info: Info,
}
// When reading files from B2 via cloudflare using
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
if info.Size < 0 {
info.Size = o.size
}
return resp, info, nil
}

View File

@@ -317,13 +317,10 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
@@ -551,7 +548,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -588,7 +585,7 @@ OUTER:
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -743,7 +740,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -770,7 +767,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -842,7 +839,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -880,7 +877,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -898,7 +895,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
@@ -1011,7 +1008,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
}
@@ -1029,7 +1026,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1051,7 +1048,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
@@ -1185,7 +1182,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, err
}
@@ -1218,7 +1215,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1258,7 +1255,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err

View File

@@ -44,7 +44,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return
}
@@ -74,7 +74,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -109,10 +109,10 @@ outer:
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
delay := defaultDelay
var why string
@@ -167,7 +167,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return err
}

View File

@@ -892,7 +892,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
} else {
remoteType := config.FileGet(remote, "type")
remoteType := config.FileGet(remote, "type", "")
if remoteType == "" {
t.Skipf("skipped due to invalid remote type for %v", remote)
return nil, nil
@@ -903,14 +903,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote")
remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" {
t.Skipf("skipped due to invalid remote wrapper for %v", remote)
return nil, nil
}
remoteRemoteParts := strings.Split(remoteRemote, ":")
remoteWrapping := remoteRemoteParts[0]
remoteType := config.FileGet(remoteWrapping, "type")
remoteType := config.FileGet(remoteWrapping, "type", "")
if remoteType != "cache" {
t.Skipf("skipped due to invalid remote type for %v: '%v'", remoteWrapping, remoteType)
return nil, nil
@@ -1034,7 +1034,7 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
_, err = f.Put(context.Background(), in1, objInfo1)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)

View File

@@ -47,8 +47,7 @@ import (
// The following types of chunks are supported:
// data and control, active and temporary.
// Chunk type is identified by matching chunk file name
// based on the chunk name format configured by user and transaction
// style being used.
// based on the chunk name format configured by user.
//
// Both data and control chunks can be either temporary (aka hidden)
// or active (non-temporary aka normal aka permanent).
@@ -64,12 +63,6 @@ import (
// which is transparently converted to the new format. In its maximum
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transacion identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
//
// Chunker can tell data chunks from control chunks by the characters
// located in the "hash placeholder" position of configured format.
// Data chunks have decimal digits there.
@@ -108,7 +101,7 @@ const maxMetadataSize = 1023
const maxMetadataSizeWritten = 255
// Current/highest supported metadata format.
const metadataVersion = 2
const metadataVersion = 1
// optimizeFirstChunk enables the following optimization in the Put:
// If a single chunk is expected, put the first chunk using the
@@ -231,31 +224,6 @@ It has the following fields: ver, size, nchunks, md5, sha1.`,
Help: "Warn user, skip incomplete file and proceed.",
},
},
}, {
Name: "transactions",
Advanced: true,
Default: "rename",
Help: `Choose how chunker should handle temporary files during transactions.`,
Hide: fs.OptionHideCommandLine,
Examples: []fs.OptionExample{
{
Value: "rename",
Help: "Rename temporary files after a successful transaction.",
}, {
Value: "norename",
Help: `Leave temporary file names and write transaction ID to metadata file.
Metadata is required for no rename transactions (meta format cannot be "none").
If you are using norename transactions you should be careful not to downgrade Rclone
as older versions of Rclone don't support this transaction style and will misinterpret
files manipulated by norename transactions.
This method is EXPERIMENTAL, don't use on production systems.`,
}, {
Value: "auto",
Help: `Rename or norename will be used depending on capabilities of the backend.
If meta format is set to "none", rename transactions will always be used.
This method is EXPERIMENTAL, don't use on production systems.`,
},
},
}},
})
}
@@ -277,10 +245,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
}
baseName, basePath, err := fspath.SplitFs(remote)
baseName, basePath, err := fspath.Parse(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
if baseName != "" {
baseName += ":"
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(ctx, baseName+remotePath)
@@ -300,7 +271,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType, opt.Transactions); err != nil {
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
return nil, err
}
@@ -338,14 +309,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NameFormat string `config:"name_format"`
StartFrom int `config:"start_from"`
MetaFormat string `config:"meta_format"`
HashType string `config:"hash_type"`
FailHard bool `config:"fail_hard"`
Transactions string `config:"transactions"`
Remote string `config:"remote"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NameFormat string `config:"name_format"`
StartFrom int `config:"start_from"`
MetaFormat string `config:"meta_format"`
HashType string `config:"hash_type"`
FailHard bool `config:"fail_hard"`
}
// Fs represents a wrapped fs.Fs
@@ -367,13 +337,12 @@ type Fs struct {
opt Options // copy of Options
features *fs.Features // optional features
dirSort bool // reserved for future, ignored
useNoRename bool // can be set with the transactions option
}
// configure sets up chunker for given name format, meta format and hash type.
// It also seeds the source of random transaction identifiers.
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string) error {
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
}
@@ -383,9 +352,6 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType, transactionMode string)
if err := f.setHashType(hashType); err != nil {
return err
}
if err := f.setTransactionMode(transactionMode); err != nil {
return err
}
randomSeed := time.Now().UnixNano()
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
@@ -445,27 +411,6 @@ func (f *Fs) setHashType(hashType string) error {
return nil
}
func (f *Fs) setTransactionMode(transactionMode string) error {
switch transactionMode {
case "rename":
f.useNoRename = false
case "norename":
if !f.useMeta {
return errors.New("incompatible transaction options")
}
f.useNoRename = true
case "auto":
f.useNoRename = !f.CanQuickRename()
if f.useNoRename && !f.useMeta {
f.useNoRename = false
return errors.New("using norename transactions requires metadata")
}
default:
return fmt.Errorf("unsupported transaction mode '%s'", transactionMode)
}
return nil
}
// setChunkNameFormat converts pattern based chunk name format
// into Printf format and Regular expressions for data and
// control chunks.
@@ -748,7 +693,6 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
byRemote := make(map[string]*Object)
badEntry := make(map[string]bool)
isSubdir := make(map[string]bool)
txnByRemote := map[string]string{}
var tempEntries fs.DirEntries
for _, dirOrObject := range sortedEntries {
@@ -761,18 +705,12 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
object := f.newObject("", entry, nil)
byRemote[remote] = object
tempEntries = append(tempEntries, object)
if f.useNoRename {
txnByRemote[remote], err = object.readXactID(ctx)
if err != nil {
return nil, err
}
}
break
}
// this is some kind of chunk
// metobject should have been created above if present
isSpecial := xactID != "" || ctrlType != ""
mainObject := byRemote[mainRemote]
isSpecial := xactID != txnByRemote[mainRemote] || ctrlType != ""
if mainObject == nil && f.useMeta && !isSpecial {
fs.Debugf(f, "skip orphan data chunk %q", remote)
break
@@ -871,11 +809,10 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
}
var (
o *Object
baseObj fs.Object
currentXactID string
err error
sameMain bool
o *Object
baseObj fs.Object
err error
sameMain bool
)
if f.useMeta {
@@ -919,14 +856,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
return nil, errors.Wrap(err, "can't detect composite file")
}
if f.useNoRename {
currentXactID, err = o.readXactID(ctx)
if err != nil {
return nil, err
}
}
caseInsensitive := f.features.CaseInsensitive
for _, dirOrObject := range entries {
entry, ok := dirOrObject.(fs.Object)
if !ok {
@@ -948,7 +878,7 @@ func (f *Fs) scanObject(ctx context.Context, remote string, quickScan bool) (fs.
if !sameMain {
continue // skip alien chunks
}
if ctrlType != "" || xactID != currentXactID {
if ctrlType != "" || xactID != "" {
if f.useMeta {
// temporary/control chunk calls for lazy metadata read
o.unsure = true
@@ -1063,57 +993,12 @@ func (o *Object) readMetadata(ctx context.Context) error {
}
o.md5 = metaInfo.md5
o.sha1 = metaInfo.sha1
o.xactID = metaInfo.xactID
}
o.isFull = true // cache results
o.xIDCached = true
return nil
}
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cahced return it now
if o.xIDCached {
return o.xactID, nil
}
// Avoid reading metadata for backends that don't use xactID to identify permanent chunks
if !o.f.useNoRename {
return "", errors.New("readXactID requires norename transactions")
}
if o.main == nil {
return "", errors.New("readXactID requires valid metaobject")
}
if o.main.Size() > maxMetadataSize {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
reader, err := o.main.Open(ctx)
if err != nil {
return "", err
}
data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err
}
switch o.f.opt.MetaFormat {
case "simplejson":
if data != nil && len(data) > maxMetadataSizeWritten {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
var metadata metaSimpleJSON
err = json.Unmarshal(data, &metadata)
if err != nil {
return "", nil // this was likely not a metadata object, return empty xactID but don't throw error
}
xactID = metadata.XactID
}
o.xactID = xactID
o.xIDCached = true
return xactID, nil
}
// put implements Put, PutStream, PutUnchecked, Update
func (f *Fs) put(
ctx context.Context, in io.Reader, src fs.ObjectInfo, remote string, options []fs.OpenOption,
@@ -1266,17 +1151,14 @@ func (f *Fs) put(
// If previous object was chunked, remove its chunks
f.removeOldChunks(ctx, baseRemote)
if !f.useNoRename {
// The transaction suffix will be removed for backends with quick rename operations
for chunkNo, chunk := range c.chunks {
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
if errMove != nil {
return nil, errMove
}
c.chunks[chunkNo] = chunkMoved
// Rename data chunks from temporary to final names
for chunkNo, chunk := range c.chunks {
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
if errMove != nil {
return nil, errMove
}
xactID = ""
c.chunks[chunkNo] = chunkMoved
}
if !f.useMeta {
@@ -1296,7 +1178,7 @@ func (f *Fs) put(
switch f.opt.MetaFormat {
case "simplejson":
c.updateHashes()
metadata, err = marshalSimpleJSON(ctx, sizeTotal, len(c.chunks), c.md5, c.sha1, xactID)
metadata, err = marshalSimpleJSON(ctx, sizeTotal, len(c.chunks), c.md5, c.sha1)
}
if err == nil {
metaInfo := f.wrapInfo(src, baseRemote, int64(len(metadata)))
@@ -1308,7 +1190,6 @@ func (f *Fs) put(
o := f.newObject("", metaObject, c.chunks)
o.size = sizeTotal
o.xactID = xactID
return o, nil
}
@@ -1712,7 +1593,7 @@ func (f *Fs) copyOrMove(ctx context.Context, o *Object, remote string, do copyMo
var metadata []byte
switch f.opt.MetaFormat {
case "simplejson":
metadata, err = marshalSimpleJSON(ctx, newObj.size, len(newChunks), md5, sha1, o.xactID)
metadata, err = marshalSimpleJSON(ctx, newObj.size, len(newChunks), md5, sha1)
if err == nil {
metaInfo := f.wrapInfo(metaObject, "", int64(len(metadata)))
err = newObj.main.Update(ctx, bytes.NewReader(metadata), metaInfo)
@@ -1928,13 +1809,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
metaXactID := ""
if f.useNoRename {
metaObject, _ := f.base.NewObject(ctx, mainPath)
dummyObject := f.newObject("", metaObject, nil)
metaXactID, _ = dummyObject.readXactID(ctx)
}
if mainPath != "" && xactID == metaXactID {
if mainPath != "" && xactID == "" {
path = mainPath
}
}
@@ -1955,17 +1830,15 @@ func (f *Fs) Shutdown(ctx context.Context) error {
// Object represents a composite file wrapping one or more data chunks
type Object struct {
remote string
main fs.Object // meta object if file is composite, or wrapped non-chunked file, nil if meta format is 'none'
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
isFull bool // true if metadata has been read
xIDCached bool // true if xactID has been read
unsure bool // true if need to read metadata to detect object type
xactID string // transaction ID for "norename" or empty string for "renamed" chunks
md5 string
sha1 string
f *Fs
remote string
main fs.Object // meta object if file is composite, or wrapped non-chunked file, nil if meta format is 'none'
chunks []fs.Object // active data chunks if file is composite, or wrapped file as a single chunk if meta format is 'none'
size int64 // cached total size of chunks in a composite file or -1 for non-chunked files
isFull bool // true if metadata has been read
unsure bool // true if need to read metadata to detect object type
md5 string
sha1 string
f *Fs
}
func (o *Object) addChunk(chunk fs.Object, chunkNo int) error {
@@ -2293,7 +2166,6 @@ type ObjectInfo struct {
src fs.ObjectInfo
fs *Fs
nChunks int // number of data chunks
xactID string // transaction ID for "norename" or empty string for "renamed" chunks
size int64 // overrides source size by the total size of data chunks
remote string // overrides remote name
md5 string // overrides MD5 checksum
@@ -2392,9 +2264,8 @@ type metaSimpleJSON struct {
Size *int64 `json:"size"` // total size of data chunks
ChunkNum *int `json:"nchunks"` // number of data chunks
// optional extra fields
MD5 string `json:"md5,omitempty"`
SHA1 string `json:"sha1,omitempty"`
XactID string `json:"txn,omitempty"` // transaction ID for norename transactions
MD5 string `json:"md5,omitempty"`
SHA1 string `json:"sha1,omitempty"`
}
// marshalSimpleJSON
@@ -2404,20 +2275,16 @@ type metaSimpleJSON struct {
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1, xactID string) ([]byte, error) {
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
version := metadataVersion
if xactID == "" && version == 2 {
version = 1
}
metadata := metaSimpleJSON{
// required core fields
Version: &version,
Size: &size,
ChunkNum: &nChunks,
// optional extra fields
MD5: md5,
SHA1: sha1,
XactID: xactID,
MD5: md5,
SHA1: sha1,
}
data, err := json.Marshal(&metadata)
if err == nil && data != nil && len(data) >= maxMetadataSizeWritten {
@@ -2495,7 +2362,6 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
info.nChunks = *metadata.ChunkNum
info.md5 = metadata.MD5
info.sha1 = metadata.SHA1
info.xactID = metadata.XactID
return info, true, nil
}
@@ -2528,11 +2394,6 @@ func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// CanQuickRename returns true if the Fs supports a quick rename operation
func (f *Fs) CanQuickRename() bool {
return f.base.Features().Move != nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -468,15 +468,9 @@ func testPreventCorruption(t *testing.T, f *Fs) {
return obj
}
billyObj := newFile("billy")
billyTxn := billyObj.(*Object).xactID
if f.useNoRename {
require.True(t, billyTxn != "")
} else {
require.True(t, billyTxn == "")
}
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", billyTxn)
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
}
err := f.Mkdir(ctx, billyChunkName(1))
@@ -493,13 +487,11 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
_, err = f.base.NewObject(ctx, billyChunk4Name)
require.NoError(t, err)
_, err = f.NewObject(ctx, billyChunk4Name)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
@@ -528,8 +520,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyTxn := willyObj.(*Object).xactID
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", willyTxn)
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
@@ -570,20 +561,17 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (obj fs.Object, filename string, txnID string) {
filename = path.Join(dir, name)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj = fstests.PutTestContents(ctx, t, f, &item, contents, true)
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
if chunkObj, isChunkObj := obj.(*Object); isChunkObj {
txnID = chunkObj.xactID
}
return
return obj, filename
}
f.opt.FailHard = false
file, fileName, fileTxn := newFile(f, "wreaker")
wreak, _, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", fileTxn))
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -662,7 +650,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "", "")
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
@@ -676,7 +664,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
runSubtest(futureMeta, "future")
}
// Test that chunker refuses to change on objects with future/unknown metadata
// test that chunker refuses to change on objects with future/unknowm metadata
func testFutureProof(t *testing.T, f *Fs) {
if f.opt.MetaFormat == "none" {
t.Skip("this test requires metadata support")
@@ -750,100 +738,6 @@ func testFutureProof(t *testing.T, f *Fs) {
}
}
// The newer method of doing transactions without renaming should still be able to correctly process chunks that were created with renaming
// If you attempt to do the inverse, however, the data chunks will be ignored causing commands to perform incorrectly
func testBackwardsCompatibility(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't do norename transactions without metadata")
}
const dir = "backcomp"
ctx := context.Background()
saveOpt := f.opt
saveUseNoRename := f.useNoRename
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
f.useNoRename = saveUseNoRename
}()
f.opt.ChunkSize = fs.SizeSuffix(10)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(250)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
f.useNoRename = false
file, fileName := newFile(f, "renamefile")
f.opt.FailHard = false
item := fstest.NewItem(fileName, contents, modTime)
var items []fstest.Item
items = append(items, item)
f.useNoRename = true
fstest.CheckListingWithRoot(t, f, dir, items, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.NoError(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.NoError(t, err)
f.opt.FailHard = false
_ = file.Remove(ctx)
}
func testChunkerServerSideMove(t *testing.T, f *Fs) {
if !f.useMeta {
t.Skip("Can't test norename transactions without metadata")
}
ctx := context.Background()
const dir = "servermovetest"
subRemote := fmt.Sprintf("%s:%s/%s", f.Name(), f.Root(), dir)
subFs1, err := fs.NewFs(ctx, subRemote+"/subdir1")
assert.NoError(t, err)
fs1, isChunkerFs := subFs1.(*Fs)
assert.True(t, isChunkerFs)
fs1.useNoRename = false
fs1.opt.ChunkSize = fs.SizeSuffix(3)
subFs2, err := fs.NewFs(ctx, subRemote+"/subdir2")
assert.NoError(t, err)
fs2, isChunkerFs := subFs2.(*Fs)
assert.True(t, isChunkerFs)
fs2.useNoRename = true
fs2.opt.ChunkSize = fs.SizeSuffix(3)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
item := fstest.Item{Path: "movefile", ModTime: modTime}
contents := "abcdef"
_, file := fstests.PutTestContents(ctx, t, fs1, &item, contents, true)
dstOverwritten, _ := fs2.NewObject(ctx, "movefile")
dstFile, err := operations.Move(ctx, fs2, dstOverwritten, "movefile", file)
assert.NoError(t, err)
assert.Equal(t, int64(len(contents)), dstFile.Size())
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
_ = operations.Purge(ctx, f.base, dir)
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
@@ -870,12 +764,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("FutureProof", func(t *testing.T) {
testFutureProof(t, f)
})
t.Run("BackwardsCompatibility", func(t *testing.T) {
testBackwardsCompatibility(t, f)
})
t.Run("ChunkerServerSideMove", func(t *testing.T) {
testChunkerServerSideMove(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -101,21 +101,6 @@ names, or for debugging purposes.`,
Default: false,
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "no_data_encryption",
Help: "Option to either encrypt file data or leave it unencrypted.",
Default: false,
Advanced: true,
Examples: []fs.OptionExample{
{
Value: "true",
Help: "Don't encrypt file data, leave it unencrypted.",
},
{
Value: "false",
Help: "Encrypt file data.",
},
},
}},
})
}
@@ -224,7 +209,6 @@ type Options struct {
Remote string `config:"remote"`
FilenameEncryption string `config:"filename_encryption"`
DirectoryNameEncryption bool `config:"directory_name_encryption"`
NoDataEncryption bool `config:"no_data_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
@@ -362,10 +346,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
if f.opt.NoDataEncryption {
return put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
}
// Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in)
if err != nil {
@@ -404,16 +384,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash")
}
if srcHash != "" && dstHash != "" {
if srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object
err = o.Remove(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
}
@@ -640,10 +617,6 @@ func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Objec
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
if f.opt.NoDataEncryption {
return src.Hash(ctx, hashType)
}
// Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
@@ -849,13 +822,9 @@ func (o *Object) Remote() string {
// Size returns the size of the file
func (o *Object) Size() int64 {
size := o.Object.Size()
if !o.f.opt.NoDataEncryption {
var err error
size, err = o.f.cipher.DecryptedSize(size)
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
@@ -873,10 +842,6 @@ func (o *Object) UnWrap() fs.Object {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
if o.f.opt.NoDataEncryption {
return o.Object.Open(ctx, options...)
}
var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1
for _, option := range options {

View File

@@ -91,26 +91,3 @@ func TestObfuscate(t *testing.T) {
UnimplementableObjectMethods: []string{"MimeType"},
})
}
// TestNoDataObfuscate runs integration tests against the remote
func TestNoDataObfuscate(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
name := "TestCrypt4"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*crypt.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "crypt"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}

View File

@@ -590,13 +590,13 @@ type Fs struct {
}
type baseObject struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents []string // IDs of the parent directories
fs *Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents int // number of parents
}
type documentObject struct {
baseObject
@@ -641,10 +641,7 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
@@ -698,20 +695,20 @@ func containsString(slice []string, s string) bool {
}
// getFile returns drive.File for the ID passed and fields passed in
func (f *Fs) getFile(ctx context.Context, ID string, fields googleapi.Field) (info *drive.File, err error) {
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Get(ID).
Fields(fields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
return info, err
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID(ctx context.Context) (string, error) {
info, err := f.getFile(ctx, "root", "id")
func (f *Fs) getRootID() (string, error) {
info, err := f.getFile("root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
@@ -817,7 +814,7 @@ OUTER:
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
@@ -840,7 +837,7 @@ OUTER:
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
continue
}
item, err = f.resolveShortcut(ctx, item)
item, err = f.resolveShortcut(item)
if err != nil {
return false, errors.Wrap(err, "list")
}
@@ -858,7 +855,7 @@ OUTER:
if !found {
continue
}
_, exportName, _, _ := f.findExportFormat(ctx, item)
_, exportName, _, _ := f.findExportFormat(item)
if exportName == "" || exportName != title {
continue
}
@@ -1158,7 +1155,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
f.rootFolderID = f.opt.TeamDriveID
} else {
// otherwise look up the actual root ID
rootID, err := f.getRootID(ctx)
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
@@ -1239,7 +1236,7 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate: modifiedDate,
mimeType: info.MimeType,
bytes: size,
parents: info.Parents,
parents: len(info.Parents),
}
}
@@ -1331,26 +1328,26 @@ func (f *Fs) newLinkObject(remote string, info *drive.File, extension, exportMim
// newObjectWithInfo creates an fs.Object for any drive.File
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *drive.File) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
// If item has MD5 sum or a length it is a file stored on drive
if info.Md5Checksum != "" || info.Size > 0 {
return f.newRegularObject(remote, info), nil
}
extension, exportName, exportMimeType, isDocument := f.findExportFormat(ctx, info)
return f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
extension, exportName, exportMimeType, isDocument := f.findExportFormat(info)
return f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
}
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
//
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
ctx context.Context, remote string, info *drive.File,
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(ctx, info)
info, err = f.resolveShortcut(info)
if err != nil {
return nil, errors.Wrap(err, "new object")
}
@@ -1398,7 +1395,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
remote = remote[:len(remote)-len(extension)]
obj, err := f.newObjectWithExportInfo(ctx, remote, info, extension, exportName, exportMimeType, isDocument)
obj, err := f.newObjectWithExportInfo(remote, info, extension, exportName, exportMimeType, isDocument)
switch {
case err != nil:
return nil, err
@@ -1415,7 +1412,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
pathID = actualID(pathID)
found, err = f.list(ctx, []string{pathID}, leaf, true, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(ctx, item)
_, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf {
pathIDOut = item.Id
return true
@@ -1450,8 +1447,8 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
info, err = f.svc.Files.Create(createInfo).
Fields("id").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -1486,15 +1483,15 @@ func linkTemplate(mt string) *template.Template {
})
return _linkTemplates[mt]
}
func (f *Fs) fetchFormats(ctx context.Context) {
func (f *Fs) fetchFormats() {
fetchFormatsOnce.Do(func() {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().
Fields("exportFormats,importFormats").
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
@@ -1511,8 +1508,8 @@ func (f *Fs) fetchFormats(ctx context.Context) {
// if necessary.
//
// if the fetch fails then it will not export any drive formats
func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
f.fetchFormats(ctx)
func (f *Fs) exportFormats() map[string][]string {
f.fetchFormats()
return _exportFormats
}
@@ -1520,8 +1517,8 @@ func (f *Fs) exportFormats(ctx context.Context) map[string][]string {
// if necessary.
//
// if the fetch fails then it will not import any drive formats
func (f *Fs) importFormats(ctx context.Context) map[string][]string {
f.fetchFormats(ctx)
func (f *Fs) importFormats() map[string][]string {
f.fetchFormats()
return _importFormats
}
@@ -1530,9 +1527,9 @@ func (f *Fs) importFormats(ctx context.Context) map[string][]string {
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string) (
func (f *Fs) findExportFormatByMimeType(itemMimeType string) (
extension, mimeType string, isDocument bool) {
exportMimeTypes, isDocument := f.exportFormats(ctx)[itemMimeType]
exportMimeTypes, isDocument := f.exportFormats()[itemMimeType]
if isDocument {
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
@@ -1559,8 +1556,8 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
func (f *Fs) findExportFormat(item *drive.File) (extension, filename, mimeType string, isDocument bool) {
extension, mimeType, isDocument = f.findExportFormatByMimeType(item.MimeType)
if extension != "" {
filename = item.Name + extension
}
@@ -1572,9 +1569,9 @@ func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension,
// MIME type is returned
//
// When no match is found "" is returned.
func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
func (f *Fs) findImportFormat(mimeType string) string {
mimeType = fixMimeType(mimeType)
ifs := f.importFormats(ctx)
ifs := f.importFormats()
for _, mt := range f.importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs[mimeType]
@@ -1607,7 +1604,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(ctx, path.Join(dir, item.Name), item)
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil {
iErr = err
return true
@@ -1720,7 +1717,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
}
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(ctx, remote, item)
entry, err := f.itemToDirEntry(remote, item)
if err != nil {
iErr = err
return true
@@ -1985,7 +1982,7 @@ func isShortcut(item *drive.File) bool {
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *drive.File, err error) {
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
return item, nil
}
@@ -1993,7 +1990,7 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil
}
newItem, err = f.getFile(ctx, item.ShortcutDetails.TargetId, f.fileFields)
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
@@ -2015,21 +2012,18 @@ func (f *Fs) resolveShortcut(ctx context.Context, item *drive.File) (newItem *dr
// itemToDirEntry converts a drive.File to an fs.DirEntry.
// When the drive.File cannot be represented as an fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File) (entry fs.DirEntry, err error) {
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
f.dirCache.Put(remote, item.Id)
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
d := fs.NewDir(remote, when).SetID(item.Id)
if len(item.Parents) > 0 {
d.SetParentID(item.Parents[0])
}
return d, nil
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
entry, err = f.newObjectWithInfo(ctx, remote, item)
entry, err = f.newObjectWithInfo(remote, item)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
@@ -2096,12 +2090,12 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
importMimeType := ""
if f.importMimeTypes != nil && !f.opt.SkipGdocs {
importMimeType = f.findImportFormat(ctx, srcMimeType)
importMimeType = f.findImportFormat(srcMimeType)
if isInternalMimeType(importMimeType) {
remote = remote[:len(remote)-len(srcExt)]
exportExt, _, _ = f.findExportFormatByMimeType(ctx, importMimeType)
exportExt, _, _ = f.findExportFormatByMimeType(importMimeType)
if exportExt == "" {
return nil, errors.Errorf("No export format found for %q", importMimeType)
}
@@ -2131,8 +2125,8 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2144,7 +2138,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return nil, err
}
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
}
// MergeDirs merges the contents of all the directories passed
@@ -2186,8 +2180,8 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
AddParents(dstDir.ID()).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
@@ -2220,14 +2214,14 @@ func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
_, err = f.svc.Files.Update(id, &info).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
Do()
} else {
err = f.svc.Files.Delete(id).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
Do()
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2340,12 +2334,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if isDoc {
// preserve the description on copy for docs
info, err := f.getFile(ctx, actualID(srcObj.id), "description")
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
} else {
createInfo.Description = info.Description
return nil, errors.Wrap(err, "failed to read description for Google Doc")
}
createInfo.Description = info.Description
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
@@ -2361,13 +2354,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
newObject, err := f.newObjectWithInfo(ctx, remote, info)
newObject, err := f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
@@ -2461,7 +2454,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
}
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
@@ -2479,7 +2472,7 @@ func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Shared Drive info")
@@ -2502,7 +2495,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
@@ -2574,14 +2567,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
AddParents(dstParents).
Fields(partialFields).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
@@ -2611,8 +2604,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
_, err = f.svc.Permissions.Create(id, permission).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -2654,8 +2647,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
AddParents(dstDirectoryID).
Fields("").
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
return err
@@ -2673,7 +2666,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartPageToken early so all changes from now on get processed
startPageToken, err := f.changeNotifyStartPageToken(ctx)
startPageToken, err := f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
}
@@ -2698,7 +2691,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
case <-tickerC:
if startPageToken == "" {
startPageToken, err = f.changeNotifyStartPageToken(ctx)
startPageToken, err = f.changeNotifyStartPageToken()
if err != nil {
fs.Infof(f, "Failed to get StartPageToken: %s", err)
continue
@@ -2713,15 +2706,15 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
}()
}
func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (pageToken string, err error) {
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Context(ctx).Do()
return f.shouldRetry(ctx, err)
startPageToken, err = changes.Do()
return f.shouldRetry(err)
})
if err != nil {
return
@@ -2750,7 +2743,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
changesCall.Spaces("appDataFolder")
}
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return
@@ -2946,8 +2939,8 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
Context(ctx).Do()
return dstFs.shouldRetry(ctx, err)
Do()
return dstFs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
@@ -2955,7 +2948,7 @@ func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPat
if isDir {
return nil, nil
}
return dstFs.newObjectWithInfo(ctx, dstPath, info)
return dstFs.newObjectWithInfo(dstPath, info)
}
// List all team drives
@@ -2967,7 +2960,7 @@ func (f *Fs) listTeamDrives(ctx context.Context) (drives []*drive.TeamDrive, err
var teamDrives *drive.TeamDriveList
err = f.pacer.Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(ctx, err)
return defaultFs.shouldRetry(err)
})
if err != nil {
return drives, errors.Wrap(err, "listing Team Drives failed")
@@ -3009,8 +3002,8 @@ func (f *Fs) unTrash(ctx context.Context, dir string, directoryID string, recurs
_, err := f.svc.Files.Update(item.Id, &update).
SupportsAllDrives(true).
Fields("trashed").
Context(ctx).Do()
return f.shouldRetry(ctx, err)
Do()
return f.shouldRetry(err)
})
if err != nil {
err = errors.Wrap(err, "failed to restore")
@@ -3052,7 +3045,7 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.fileFields)
info, err := f.getFile(id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
}
@@ -3060,7 +3053,7 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
o, err := f.newObjectWithInfo(info.Name, info)
if err != nil {
return err
}
@@ -3361,7 +3354,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, f.opt.TrashedOnly, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(ctx, item)
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
info = item
return true
@@ -3412,8 +3405,8 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
if err != nil {
return err
@@ -3451,7 +3444,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
_ = res.Body.Close() // ignore error
}
}
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
return req, nil, err
@@ -3543,8 +3536,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
Fields("downloadUrl").
SupportsAllDrives(true).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
@@ -3624,8 +3617,8 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Context(ctx).Do()
return o.fs.shouldRetry(ctx, err)
Do()
return o.fs.shouldRetry(err)
})
return
}
@@ -3668,7 +3661,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
newO, err := o.fs.newObjectWithInfo(ctx, src.Remote(), info)
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
if err != nil {
return err
}
@@ -3692,7 +3685,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
return errors.Errorf("can't update google document type without --drive-import-formats")
}
importMimeType = o.fs.findImportFormat(ctx, updateInfo.MimeType)
importMimeType = o.fs.findImportFormat(updateInfo.MimeType)
if importMimeType == "" {
return errors.Errorf("no import format found for %q", srcMimeType)
}
@@ -3709,7 +3702,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
remote := src.Remote()
remote = remote[:len(remote)-o.extLen]
newO, err := o.fs.newObjectWithInfo(ctx, remote, info)
newO, err := o.fs.newObjectWithInfo(remote, info)
if err != nil {
return err
}
@@ -3729,7 +3722,7 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
// Remove an object
func (o *baseObject) Remove(ctx context.Context) error {
if len(o.parents) > 1 {
if o.parents > 1 {
return errors.New("can't delete safely - has multiple parents")
}
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
@@ -3745,14 +3738,6 @@ func (o *baseObject) ID() string {
return o.id
}
// ParentID returns the ID of the Object parent if known, or "" if not
func (o *baseObject) ParentID() string {
if len(o.parents) > 0 {
return o.parents[0]
}
return ""
}
func (o *documentObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
@@ -3813,13 +3798,10 @@ var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil)
_ fs.Object = (*documentObject)(nil)
_ fs.MimeTyper = (*documentObject)(nil)
_ fs.IDer = (*documentObject)(nil)
_ fs.ParentIDer = (*documentObject)(nil)
_ fs.Object = (*linkObject)(nil)
_ fs.MimeTyper = (*linkObject)(nil)
_ fs.IDer = (*linkObject)(nil)
_ fs.ParentIDer = (*linkObject)(nil)
)

View File

@@ -111,7 +111,6 @@ func TestInternalParseExtensions(t *testing.T) {
}
func TestInternalFindExportFormat(t *testing.T) {
ctx := context.Background()
item := &drive.File{
Name: "file",
MimeType: "application/vnd.google-apps.document",
@@ -129,7 +128,7 @@ func TestInternalFindExportFormat(t *testing.T) {
} {
f := new(Fs)
f.exportExtensions = test.extensions
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(ctx, item)
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
assert.Equal(t, test.wantExtension, gotExtension)
if test.wantExtension != "" {
assert.Equal(t, item.Name+gotExtension, gotFilename)

View File

@@ -94,7 +94,7 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -202,7 +202,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := rx.f.shouldRetry(ctx, err)
again, err := rx.f.shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

View File

@@ -94,14 +94,7 @@ const (
var (
// Description of how to auth for this app
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
"files.content.read",
"sharing.write",
// "file_requests.write",
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
},
Scopes: []string{},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
@@ -122,19 +115,6 @@ var (
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
}
// Make a copy of the config
config := *dropboxConfig
// Make a copy of the scopes with "members.read" appended
config.Scopes = append(config.Scopes, "members.read")
return &config
}
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
@@ -149,7 +129,7 @@ func init() {
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
err := oauthutil.Config(ctx, "dropbox", name, m, dropboxConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -167,23 +147,8 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "impersonate",
Help: `Impersonate this user when using a business account.
Note that if you want to use impersonate, you should make sure this
flag is set when running "rclone config" as this will cause rclone to
request the "members.read" scope which it won't normally. This is
needed to lookup a members email address into the internal ID that
dropbox uses in the API.
Using the "members.read" scope will require a Dropbox Team Admin
to approve during the OAuth flow.
You will have to use your own App (setting your own client_id and
client_secret) to use this option as currently rclone's default set of
permissions doesn't include "members.read". This can be added once
v1.55 or later is in use everywhere.
`,
Name: "impersonate",
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
@@ -219,11 +184,11 @@ shared folder.`,
// as invalid characters.
// Testing revealed names with trailing spaces and the DEL character don't work.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: encoder.Base |
Default: (encoder.Base |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
encoder.EncodeInvalidUtf8),
}}...),
})
}
@@ -242,10 +207,8 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv files.Client // the connection to the dropbox server
svc files.Client // the connection to the dropbox server (unauthorized)
sharing sharing.Client // as above, but for generating sharing links
users users.Client // as above, but for accessing user information
team team.Client // for the Teams API
@@ -292,10 +255,7 @@ func (f *Fs) Features() *fs.Features {
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, err
}
@@ -310,7 +270,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
@@ -367,34 +327,27 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, getOauthConfig(m))
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, dropboxConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure dropbox")
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
cfg := dropbox.Config{
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
HeaderGenerator: f.headerGenerator,
}
// unauthorized config for endpoints that fail with auth
ucfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
}
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
f.team = team.New(cfg)
f.team = team.New(config)
if opt.Impersonate != "" {
user := team.UserSelectorArg{
Email: opt.Impersonate,
}
@@ -409,13 +362,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
}
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(cfg)
f.svc = files.New(ucfg)
f.sharing = sharing.New(cfg)
f.users = users.New(cfg)
f.srv = files.New(config)
f.sharing = sharing.New(config)
f.users = users.New(config)
f.features = (&fs.Features{
CaseInsensitive: true,
ReadMimeType: false,
@@ -428,7 +380,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if f.root == "" {
return f, nil
}
_, err := f.findSharedFile(ctx, f.root)
_, err := f.findSharedFile(f.root)
f.root = ""
if err == nil {
return f, fs.ErrorIsFile
@@ -448,7 +400,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// root is not empty so we have find the right shared folder if it exists
id, err := f.findSharedFolder(ctx, dir)
id, err := f.findSharedFolder(dir)
if err != nil {
// if we didn't find the specified shared folder we have to bail out here
return nil, err
@@ -456,7 +408,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// we found the specified shared folder so let's mount it
// this will add it to the users normal root namespace and allows us
// to actually perform operations on it using the normal api endpoints.
err = f.mountSharedFolder(ctx, id)
err = f.mountSharedFolder(id)
if err != nil {
switch e := err.(type) {
case sharing.MountFolderAPIError:
@@ -480,7 +432,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
var acc *users.FullAccount
err = f.pacer.Call(func() (bool, error) {
acc, err = f.users.GetCurrentAccount()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "get current account failed")
@@ -498,7 +450,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(ctx, f.slashRoot)
_, err = f.getFileMetadata(f.slashRoot)
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
@@ -532,12 +484,12 @@ func (f *Fs) setRoot(root string) {
}
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMetadata, notFound bool, err error) {
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
@@ -552,8 +504,8 @@ func (f *Fs) getMetadata(ctx context.Context, objPath string) (entry files.IsMet
}
// getFileMetadata gets the metadata for a file
func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, filePath)
func (f *Fs) getFileMetadata(filePath string) (fileInfo *files.FileMetadata, err error) {
entry, notFound, err := f.getMetadata(filePath)
if err != nil {
return nil, err
}
@@ -568,8 +520,8 @@ func (f *Fs) getFileMetadata(ctx context.Context, filePath string) (fileInfo *fi
}
// getDirMetadata gets the metadata for a directory
func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(ctx, dirPath)
func (f *Fs) getDirMetadata(dirPath string) (dirInfo *files.FolderMetadata, err error) {
entry, notFound, err := f.getMetadata(dirPath)
if err != nil {
return nil, err
}
@@ -586,7 +538,7 @@ func (f *Fs) getDirMetadata(ctx context.Context, dirPath string) (dirInfo *files
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.FileMetadata) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -595,7 +547,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.F
if info != nil {
err = o.setMetadataFromEntry(info)
} else {
err = o.readEntryAndSetMetadata(ctx)
err = o.readEntryAndSetMetadata()
}
if err != nil {
return nil, err
@@ -607,14 +559,14 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *files.F
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if f.opt.SharedFiles {
return f.findSharedFile(ctx, remote)
return f.findSharedFile(remote)
}
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
@@ -624,7 +576,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFolders(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -636,7 +588,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFoldersContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -661,8 +613,8 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
entries, err := f.listSharedFolders(ctx)
func (f *Fs) findSharedFolder(name string) (id string, err error) {
entries, err := f.listSharedFolders()
if err != nil {
return "", err
}
@@ -674,21 +626,21 @@ func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err
return "", fs.ErrorDirNotFound
}
// mountSharedFolder mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
// mountSharedFolders mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(id string) error {
arg := sharing.MountFolderArg{
SharedFolderId: id,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.sharing.MountFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
// listReceivedFiles lists shared the user as access to (note this means individual
// listSharedFolders lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
@@ -698,7 +650,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFiles(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -710,7 +662,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFilesContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -737,8 +689,8 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
return entries, nil
}
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
files, err := f.listReceivedFiles(ctx)
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
files, err := f.listReceivedFiles()
if err != nil {
return nil, err
}
@@ -761,10 +713,10 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFiles {
return f.listReceivedFiles(ctx)
return f.listReceivedFiles()
}
if f.opt.SharedFolders {
return f.listSharedFolders(ctx)
return f.listSharedFolders()
}
root := f.slashRoot
@@ -785,7 +737,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
@@ -803,7 +755,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolderContinue(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
@@ -833,7 +785,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
o, err := f.newObjectWithInfo(remote, fileInfo)
if err != nil {
return nil, err
}
@@ -882,7 +834,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// check directory doesn't exist
_, err := f.getDirMetadata(ctx, root)
_, err := f.getDirMetadata(root)
if err == nil {
return nil // directory exists already
} else if err != fs.ErrorDirNotFound {
@@ -899,7 +851,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
@@ -916,7 +868,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
if check {
// check directory exists
_, err = f.getDirMetadata(ctx, root)
_, err = f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
@@ -933,7 +885,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
@@ -946,7 +898,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
@@ -999,7 +951,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.CopyV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "copy failed")
@@ -1060,7 +1012,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.MoveV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
@@ -1094,7 +1046,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil && strings.Contains(err.Error(),
@@ -1107,7 +1059,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var listRes *sharing.ListSharedLinksResult
err = f.pacer.Call(func() (bool, error) {
listRes, err = f.sharing.ListSharedLinks(&listArg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -1149,7 +1101,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := path.Join(f.slashRoot, dstRemote)
// Check if destination exists
_, err := f.getDirMetadata(ctx, dstPath)
_, err := f.getDirMetadata(dstPath)
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
@@ -1168,7 +1120,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "MoveDir failed")
@@ -1182,7 +1134,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -1204,159 +1156,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return usage, nil
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the StartCursor early so all changes from now on get processed
startCursor, err := f.changeNotifyCursor(ctx)
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if startCursor == "" {
startCursor, err = f.changeNotifyCursor(ctx)
if err != nil {
fs.Infof(f, "Failed to get StartCursor: %s", err)
continue
}
}
fs.Debugf(f, "Checking for changes on remote")
startCursor, err = f.changeNotifyRunner(ctx, notifyFunc, startCursor)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyCursor(ctx context.Context) (cursor string, err error) {
var startCursor *files.ListFolderGetLatestCursorResult
err = f.pacer.Call(func() (bool, error) {
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
Recursive: true,
}
if arg.Path == "/" {
arg.Path = ""
}
startCursor, err = f.srv.ListFolderGetLatestCursor(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return
}
return startCursor.Cursor, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startCursor string) (newCursor string, err error) {
cursor := startCursor
var res *files.ListFolderLongpollResult
// Dropbox sets a timeout range of 30 - 480
timeout := uint64(f.ci.TimeoutOrInfinite() / time.Second)
if timeout < 30 {
timeout = 30
}
if timeout > 480 {
timeout = 480
}
err = f.pacer.Call(func() (bool, error) {
args := files.ListFolderLongpollArg{
Cursor: cursor,
Timeout: timeout,
}
res, err = f.svc.ListFolderLongpoll(&args)
return shouldRetry(ctx, err)
})
if err != nil {
return
}
if !res.Changes {
return cursor, nil
}
if res.Backoff != 0 {
fs.Debugf(f, "Waiting to poll for %d seconds", res.Backoff)
time.Sleep(time.Duration(res.Backoff) * time.Second)
}
for {
var changeList *files.ListFolderResult
arg := files.ListFolderContinueArg{
Cursor: cursor,
}
err = f.pacer.Call(func() (bool, error) {
changeList, err = f.srv.ListFolderContinue(&arg)
return shouldRetry(ctx, err)
})
if err != nil {
return "", errors.Wrap(err, "list continue")
}
cursor = changeList.Cursor
var entryType fs.EntryType
for _, entry := range changeList.Entries {
entryPath := ""
switch info := entry.(type) {
case *files.FolderMetadata:
entryType = fs.EntryDirectory
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.FileMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.DeletedMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
default:
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
continue
}
if entryPath != "" {
notifyFunc(entryPath, entryType)
}
}
if !changeList.HasMore {
break
}
}
return cursor, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
@@ -1395,7 +1194,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != DbHashType {
return "", hash.ErrUnsupported
}
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
}
@@ -1419,17 +1218,17 @@ func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
}
// Reads the entry for a file from dropbox
func (o *Object) readEntry(ctx context.Context) (*files.FileMetadata, error) {
return o.fs.getFileMetadata(ctx, o.remotePath())
func (o *Object) readEntry() (*files.FileMetadata, error) {
return o.fs.getFileMetadata(o.remotePath())
}
// Read entry if not set and set metadata from it
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
func (o *Object) readEntryAndSetMetadata() error {
// Last resort set time from client
if !o.modTime.IsZero() {
return nil
}
entry, err := o.readEntry(ctx)
entry, err := o.readEntry()
if err != nil {
return err
}
@@ -1442,12 +1241,12 @@ func (o *Object) remotePath() string {
}
// readMetaData gets the info if it hasn't already been fetched
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
// Last resort
return o.readEntryAndSetMetadata(ctx)
return o.readEntryAndSetMetadata()
}
// ModTime returns the modification time of the object
@@ -1455,7 +1254,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1489,7 +1288,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1505,7 +1304,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
switch e := err.(type) {
@@ -1524,7 +1323,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
@@ -1553,7 +1352,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1679,11 +1478,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
entry, err = o.uploadChunked(in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
entry, err = o.fs.srv.Upload(commitInfo, in)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
if err != nil {
@@ -1701,7 +1500,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
})
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}

View File

@@ -28,10 +28,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
@@ -51,41 +48,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) createObject(ctx context.Context, remote string) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return
}
// Temporary Object under construction
o = &Object{
fs: f,
remote: remote,
}
return o, leaf, directoryID, nil
}
func (f *Fs) readFileInfo(ctx context.Context, url string) (*File, error) {
request := FileInfoRequest{
URL: url,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/info.cgi",
}
var file File
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &file)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read file info")
}
return &file, err
}
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
@@ -99,7 +61,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -127,7 +89,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -156,7 +118,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
@@ -184,7 +146,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
@@ -278,7 +240,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
@@ -305,7 +267,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
@@ -334,7 +296,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -346,56 +308,6 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil
}
func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename string) (response *MoveFileResponse, err error) {
request := &MoveFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/mv.cgi",
}
response = &MoveFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
request := &CopyFileRequest{
URLs: []string{url},
FolderID: folderID,
Rename: rename,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/cp.cgi",
}
response = &CopyFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
@@ -408,7 +320,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
@@ -451,7 +363,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -485,7 +397,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {

View File

@@ -363,6 +363,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
@@ -415,89 +416,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return nil
}
// Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// Copy src to this remote using server side move operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
if err != nil {
return nil, errors.New("couldn't read file data")
}
dstObj.setMetaData(*file)
return dstObj, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
return o.(*Object).file.URL, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -72,10 +72,6 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
func (o *Object) setMetaData(file File) {
o.file = file
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
@@ -94,7 +90,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {

View File

@@ -1,10 +1,5 @@
package fichier
// FileInfoRequest is the request structure of the corresponding request
type FileInfoRequest struct {
URL string `json:"url"`
}
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
@@ -54,39 +49,6 @@ type MakeFolderResponse struct {
FolderID int `json:"folder_id"`
}
// MoveFileRequest is the request structure of the corresponding request
type MoveFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"destination_folder_id"`
Rename string `json:"rename,omitempty"`
}
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
type CopyFileRequest struct {
URLs []string `json:"urls"`
FolderID int `json:"folder_id"`
Rename string `json:"rename,omitempty"`
}
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
@@ -124,6 +86,7 @@ type EndFileUploadResponse struct {
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`

View File

@@ -228,10 +228,7 @@ var retryStatusCodes = []struct {
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, status api.OKError) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error, status api.OKError) (bool, error) {
if err != nil {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -404,7 +401,7 @@ func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKEr
// Refresh the body each retry
opts.Body = strings.NewReader(data.Encode())
resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
return f.shouldRetry(ctx, resp, err, result)
return f.shouldRetry(resp, err, result)
})
if err != nil {
return resp, err
@@ -1280,7 +1277,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
return o.fs.shouldRetry(ctx, resp, err, nil)
return o.fs.shouldRetry(resp, err, nil)
})
if err != nil {
return errors.Wrap(err, "failed to upload")

View File

@@ -5,7 +5,6 @@ import (
"context"
"crypto/tls"
"io"
"net"
"net/textproto"
"path"
"runtime"
@@ -21,8 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -34,12 +31,6 @@ var (
currentUser = env.CurrentUser()
)
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -100,22 +91,6 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
Help: "Disable using MLSD even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "close_timeout",
Help: "Maximum time to wait for a response to close.",
Default: fs.Duration(60 * time.Second),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -143,8 +118,6 @@ type Options struct {
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -161,10 +134,7 @@ type Fs struct {
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
}
// Object describes an FTP file
@@ -241,52 +211,25 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
type dialCtx struct {
f *Fs
ctx context.Context
}
// dial a new connection with fshttp dialer
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
if err != nil {
return nil, err
}
if d.f.tlsConf != nil {
conn = tls.Client(conn, d.f.tlsConf)
}
return conn, err
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
}
return fserrors.ShouldRetry(err), err
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
func (f *Fs) ftpConnection(ctx context.Context) (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(f.ci.ConnectTimeout)}
if f.opt.TLS && f.opt.ExplicitTLS {
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
} else if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} else if f.opt.ExplicitTLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -297,22 +240,18 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
err = f.pacer.Call(func() (bool, error) {
c, err = ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
return shouldRetry(ctx, err)
}
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
return shouldRetry(ctx, err)
}
return false, nil
})
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
err = errors.Wrapf(err, "failed to make FTP connection to %q", f.dialAddr)
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial")
}
return c, err
err = c.Login(f.user, f.pass)
if err != nil {
_ = c.Quit()
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Login")
}
return c, nil
}
// Get an FTP connection from the pool, or open a new one
@@ -369,32 +308,9 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock()
}
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if cErr := c.Quit(); cErr != nil {
err = cErr
}
f.pool[i] = nil
}
f.pool = nil
return err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
@@ -422,16 +338,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS {
protocol = "ftps://"
}
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -444,16 +350,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
// Make a connection and pool it to return errors early
c, err := f.getFtpConnection(ctx)
if err != nil {
@@ -482,12 +382,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
return f, err
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
// translateErrorFile turns FTP errors into rclone errors if possible for a file
func translateErrorFile(err error) error {
switch errX := err.(type) {
@@ -633,7 +527,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
timer := time.NewTimer(f.ci.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
@@ -966,8 +860,8 @@ func (f *ftpReadCloser) Close() error {
go func() {
errchan <- f.rc.Close()
}()
// Wait for Close for up to 60 seconds by default
timer := time.NewTimer(time.Duration(f.f.opt.CloseTimeout))
// Wait for Close for up to 60 seconds
timer := time.NewTimer(60 * time.Second)
select {
case err = <-errchan:
timer.Stop()
@@ -1096,6 +990,5 @@ var (
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -329,10 +329,7 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (again bool, errOut error) {
again = false
if err != nil {
if fserrors.ShouldRetry(err) {
@@ -458,7 +455,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
@@ -524,7 +521,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -627,7 +624,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -753,7 +750,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
@@ -788,7 +785,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}, nil)
}
@@ -805,7 +802,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
})
}
@@ -851,7 +848,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
for {
err = f.pacer.Call(func() (bool, error) {
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -944,7 +941,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
@@ -1015,7 +1012,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1046,7 +1043,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
_ = res.Body.Close() // ignore error
}
}
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -1112,7 +1109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1127,7 +1124,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}

View File

@@ -240,10 +240,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -332,7 +329,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
@@ -361,7 +358,7 @@ func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err erro
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
@@ -392,7 +389,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
@@ -479,7 +476,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
@@ -534,7 +531,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
@@ -678,7 +675,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
@@ -813,7 +810,7 @@ func (o *Object) Size() int64 {
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
fs.Debugf(o, "Reading size failed: %v", err)
@@ -864,7 +861,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
@@ -941,7 +938,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -996,10 +993,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
token, err = rest.ReadBody(resp)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
@@ -1027,7 +1024,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
@@ -1072,7 +1069,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")

View File

@@ -109,7 +109,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
err := o.fs.client.MkdirAll(dirname, 0755)
err := o.fs.client.MkdirAll(dirname, 755)
if err != nil {
return err
}

View File

@@ -15,7 +15,7 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/rest"
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
configfile.LoadConfig(context.Background())
config.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true

View File

@@ -235,10 +235,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -618,7 +615,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -857,7 +854,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -886,7 +883,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -998,7 +995,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
@@ -1104,7 +1101,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't purge directory")
@@ -1143,7 +1140,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1271,7 +1268,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -1449,7 +1446,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1562,7 +1559,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var response api.AllocateFileResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1627,7 +1624,7 @@ func (o *Object) Remove(ctx context.Context) error {
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}

View File

@@ -42,9 +42,8 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Advanced: runtime.GOOS != "windows",
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
Examples: []fs.OptionExample{{
Value: "true",
Help: "Disables long file names",
@@ -149,17 +148,6 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "no_preallocate",
Help: `Disable preallocation of disk space for transferred files
Preallocation of disk space helps prevent filesystem fragmentation.
However, some virtual filesystem layers (such as Google Drive File
Stream) may incorrectly set the actual file size equal to the
preallocated space, causing checksum and file size checks to fail.
Use this flag to disable preallocation.`,
Default: false,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads
@@ -203,7 +191,6 @@ type Options struct {
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
NoPreAllocate bool `config:"no_preallocate"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
Enc encoder.MultiEncoder `config:"encoding"`
@@ -1140,16 +1127,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
}
if !o.fs.opt.NoPreAllocate {
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
if err == file.ErrDiskFull {
_ = f.Close()
return err
}
}
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
out = f
} else {
@@ -1236,11 +1217,9 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
if !f.opt.NoPreAllocate {
err = file.PreAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
err = file.PreAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
if !f.opt.NoSparse && file.SetSparseImplemented {
sparseWarning.Do(func() {

View File

@@ -234,10 +234,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this response and err
// deserve to be retried. It returns the err as a convenience.
// Retries password authorization (once) in a special case of access denied.
func shouldRetry(ctx context.Context, res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
reAuthErr := f.reAuthorize(opts, err)
return reAuthErr == nil, err // return an original error
@@ -603,7 +600,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
var info api.ItemInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -739,7 +736,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
)
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -803,7 +800,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1076,7 +1073,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1219,7 +1216,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
var response api.GenericResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
switch {
@@ -1291,7 +1288,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var response api.GenericBodyResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
@@ -1395,7 +1392,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
var res *http.Response
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
closeBody(res)
@@ -1486,7 +1483,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var response api.GenericBodyResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err == nil && response.Body != "" {
@@ -1527,7 +1524,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var response api.CleanupResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &response)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return err
@@ -1560,7 +1557,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var info api.UserInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, res, err, f, &opts)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return nil, err
@@ -2079,7 +2076,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, o.fs, &opts)
return shouldRetry(res, err, o.fs, &opts)
})
if err != nil {
closeBody(res)
@@ -2175,7 +2172,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
opts.RootURL = server
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err, o.fs, &opts)
return shouldRetry(res, err, o.fs, &opts)
})
if err != nil {
if res != nil && res.Body != nil {

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -159,10 +158,7 @@ func parsePath(path string) (root string) {
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
// Let the mega library handle the low level retries
return false, err
/*
@@ -175,8 +171,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, remote string) (info *mega.Node, err error) {
rootNode, err := f.findRoot(ctx, false)
func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
@@ -241,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(ctx, false)
_, err = f.findRoot(false)
switch err {
case nil:
// root node found and is a directory
@@ -311,8 +307,8 @@ func (f *Fs) findObject(rootNode *mega.Node, file string) (node *mega.Node, err
// lookupDir looks up the node for the directory of the name given
//
// if create is true it tries to create the root directory if not found
func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
rootNode, err := f.findRoot(ctx, false)
func (f *Fs) lookupDir(dir string) (*mega.Node, error) {
rootNode, err := f.findRoot(false)
if err != nil {
return nil, err
}
@@ -320,15 +316,15 @@ func (f *Fs) lookupDir(ctx context.Context, dir string) (*mega.Node, error) {
}
// lookupParentDir finds the parent node for the remote passed in
func (f *Fs) lookupParentDir(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
func (f *Fs) lookupParentDir(remote string) (dirNode *mega.Node, leaf string, err error) {
parent, leaf := path.Split(remote)
dirNode, err = f.lookupDir(ctx, parent)
dirNode, err = f.lookupDir(parent)
return dirNode, leaf, err
}
// mkdir makes the directory and any parent directories for the
// directory of the name given
func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *mega.Node, err error) {
func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error) {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
@@ -362,7 +358,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
// create directory called name in node
err = f.pacer.Call(func() (bool, error) {
node, err = f.srv.CreateDir(name, node)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "mkdir create node failed")
@@ -372,20 +368,20 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
}
// mkdirParent creates the parent directory of remote
func (f *Fs) mkdirParent(ctx context.Context, remote string) (dirNode *mega.Node, leaf string, err error) {
rootNode, err := f.findRoot(ctx, true)
func (f *Fs) mkdirParent(remote string) (dirNode *mega.Node, leaf string, err error) {
rootNode, err := f.findRoot(true)
if err != nil {
return nil, "", err
}
parent, leaf := path.Split(remote)
dirNode, err = f.mkdir(ctx, rootNode, parent)
dirNode, err = f.mkdir(rootNode, parent)
return dirNode, leaf, err
}
// findRoot looks up the root directory node and returns it.
//
// if create is true it tries to create the root directory if not found
func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
func (f *Fs) findRoot(create bool) (*mega.Node, error) {
f.rootNodeMu.Lock()
defer f.rootNodeMu.Unlock()
@@ -407,7 +403,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
}
//..not found so create the root directory
f._rootNode, err = f.mkdir(ctx, absRoot, f.root)
f._rootNode, err = f.mkdir(absRoot, f.root)
return f._rootNode, err
}
@@ -437,7 +433,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
deleteErr := f.pacer.Call(func() (bool, error) {
err := f.srv.Delete(item, true)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if deleteErr != nil {
err = deleteErr
@@ -451,7 +447,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.Node) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -461,7 +457,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.No
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -472,7 +468,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *mega.No
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// list the objects into the function supplied
@@ -510,7 +506,7 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
dirNode, err := f.lookupDir(ctx, dir)
dirNode, err := f.lookupDir(dir)
if err != nil {
return nil, err
}
@@ -522,7 +518,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
entries = append(entries, d)
case mega.FILE:
o, err := f.newObjectWithInfo(ctx, remote, info)
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
@@ -546,8 +542,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the dirNode, object, leaf and error
//
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
dirNode, leaf, err = f.mkdirParent(ctx, remote)
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
dirNode, leaf, err = f.mkdirParent(remote)
if err != nil {
return nil, nil, leaf, err
}
@@ -569,7 +565,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
@@ -595,7 +591,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
size := src.Size()
modTime := src.ModTime(ctx)
o, _, _, err := f.createObject(ctx, remote, modTime, size)
o, _, _, err := f.createObject(remote, modTime, size)
if err != nil {
return nil, err
}
@@ -604,30 +600,30 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
rootNode, err := f.findRoot(ctx, true)
rootNode, err := f.findRoot(true)
if err != nil {
return err
}
_, err = f.mkdir(ctx, rootNode, dir)
_, err = f.mkdir(rootNode, dir)
return errors.Wrap(err, "Mkdir failed")
}
// deleteNode removes a file or directory, observing useTrash
func (f *Fs) deleteNode(ctx context.Context, node *mega.Node) (err error) {
func (f *Fs) deleteNode(node *mega.Node) (err error) {
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Delete(node, f.opt.HardDelete)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return err
}
// purgeCheck removes the directory dir, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
rootNode, err := f.findRoot(ctx, false)
rootNode, err := f.findRoot(false)
if err != nil {
return err
}
@@ -648,7 +644,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
waitEvent := f.srv.WaitEventsStart()
err = f.deleteNode(ctx, dirNode)
err = f.deleteNode(dirNode)
if err != nil {
return errors.Wrap(err, "delete directory node failed")
}
@@ -666,7 +662,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -680,13 +676,13 @@ func (f *Fs) Precision() time.Duration {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
return f.purgeCheck(dir, false)
}
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
//
// info will be updates
func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node) (err error) {
var (
dstFs = f
srcDirNode, dstDirNode *mega.Node
@@ -696,12 +692,12 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
if dstRemote != "" {
// lookup or create the destination parent directory
dstDirNode, dstLeaf, err = dstFs.mkdirParent(ctx, dstRemote)
dstDirNode, dstLeaf, err = dstFs.mkdirParent(dstRemote)
} else {
// find or create the parent of the root directory
absRoot := dstFs.srv.FS.GetRoot()
dstParent, dstLeaf = path.Split(dstFs.root)
dstDirNode, err = dstFs.mkdir(ctx, absRoot, dstParent)
dstDirNode, err = dstFs.mkdir(absRoot, dstParent)
}
if err != nil {
return errors.Wrap(err, "server-side move failed to make dst parent dir")
@@ -709,7 +705,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
if srcRemote != "" {
// lookup the existing parent directory
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(ctx, srcRemote)
srcDirNode, srcLeaf, err = srcFs.lookupParentDir(srcRemote)
} else {
// lookup the existing root parent
absRoot := srcFs.srv.FS.GetRoot()
@@ -725,7 +721,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
//log.Printf("move src %p %q dst %p %q", srcDirNode, srcDirNode.GetName(), dstDirNode, dstDirNode.GetName())
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server-side move failed")
@@ -739,7 +735,7 @@ func (f *Fs) move(ctx context.Context, dstRemote string, srcFs *Fs, srcRemote st
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "server-side rename failed")
@@ -771,7 +767,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
err := f.move(ctx, remote, srcObj.fs, srcObj.remote, srcObj.info)
err := f.move(remote, srcObj.fs, srcObj.remote, srcObj.info)
if err != nil {
return nil, err
}
@@ -802,13 +798,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// find the source
info, err := srcFs.lookupDir(ctx, srcRemote)
info, err := srcFs.lookupDir(srcRemote)
if err != nil {
return err
}
// check the destination doesn't exist
_, err = dstFs.lookupDir(ctx, dstRemote)
_, err = dstFs.lookupDir(dstRemote)
if err == nil {
return fs.ErrorDirExists
} else if err != fs.ErrorDirNotFound {
@@ -816,7 +812,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Do the move
err = f.move(ctx, dstRemote, srcFs, srcRemote, info)
err = f.move(dstRemote, srcFs, srcRemote, info)
if err != nil {
return err
}
@@ -842,7 +838,7 @@ func (f *Fs) Hashes() hash.Set {
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
root, err := f.findRoot(ctx, false)
root, err := f.findRoot(false)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find root node")
}
@@ -890,7 +886,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
@@ -898,7 +894,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.deleteNode(ctx, srcDirNode)
err = f.deleteNode(srcDirNode)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
@@ -912,7 +908,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
q, err = f.srv.GetQuota()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Mega Quota")
@@ -967,11 +963,11 @@ func (o *Object) setMetaData(info *mega.Node) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.info != nil {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
if err == fs.ErrorDirNotFound {
err = fs.ErrorObjectNotFound
@@ -1002,7 +998,6 @@ func (o *Object) Storable() bool {
// openObject represents a download in progress
type openObject struct {
ctx context.Context
mu sync.Mutex
o *Object
d *mega.Download
@@ -1013,14 +1008,14 @@ type openObject struct {
}
// get the next chunk
func (oo *openObject) getChunk(ctx context.Context) (err error) {
func (oo *openObject) getChunk() (err error) {
if oo.id >= oo.d.Chunks() {
return io.EOF
}
var chunk []byte
err = oo.o.fs.pacer.Call(func() (bool, error) {
chunk, err = oo.d.DownloadChunk(oo.id)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -1050,7 +1045,7 @@ func (oo *openObject) Read(p []byte) (n int, err error) {
oo.skip -= int64(size)
}
if len(oo.chunk) == 0 {
err = oo.getChunk(oo.ctx)
err = oo.getChunk()
if err != nil {
return 0, err
}
@@ -1073,7 +1068,7 @@ func (oo *openObject) Close() (err error) {
}
err = oo.o.fs.pacer.Call(func() (bool, error) {
err = oo.d.Finish()
return shouldRetry(oo.ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish download")
@@ -1101,14 +1096,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var d *mega.Download
err = o.fs.pacer.Call(func() (bool, error) {
d, err = o.fs.srv.NewDownload(o.info)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "open download file failed")
}
oo := &openObject{
ctx: ctx,
o: o,
d: d,
skip: offset,
@@ -1131,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remote := o.Remote()
// Create the parent directory
dirNode, leaf, err := o.fs.mkdirParent(ctx, remote)
dirNode, leaf, err := o.fs.mkdirParent(remote)
if err != nil {
return errors.Wrap(err, "update make parent dir failed")
}
@@ -1139,7 +1133,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *mega.Upload
err = o.fs.pacer.Call(func() (bool, error) {
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to create session")
@@ -1160,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.Call(func() (bool, error) {
err = u.UploadChunk(id, chunk)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "upload file failed to upload chunk")
@@ -1171,7 +1165,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var info *mega.Node
err = o.fs.pacer.Call(func() (bool, error) {
info, err = u.Finish()
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to finish upload")
@@ -1179,7 +1173,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// If the upload succeeded and the original object existed, then delete it
if o.info != nil {
err = o.fs.deleteNode(ctx, o.info)
err = o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "upload failed to remove old version")
}
@@ -1191,7 +1185,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
err := o.fs.deleteNode(ctx, o.info)
err := o.fs.deleteNode(o.info)
if err != nil {
return errors.Wrap(err, "Remove object failed")
}

View File

@@ -302,6 +302,7 @@ func init() {
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
config.SaveConfig()
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
@@ -549,10 +550,7 @@ var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
retry := false
if resp != nil {
switch resp.StatusCode {
@@ -599,7 +597,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID s
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, resp, err
@@ -615,7 +613,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
opts.Path = strings.TrimSuffix(opts.Path, ":")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return info, resp, err
}
@@ -871,7 +869,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -903,7 +901,7 @@ OUTER:
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -1040,7 +1038,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1090,7 +1088,7 @@ func (f *Fs) Precision() time.Duration {
// waitForJob waits for the job with status in url to complete
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
deadline := time.Now().Add(f.ci.Timeout)
for time.Now().Before(deadline) {
var resp *http.Response
var err error
@@ -1128,7 +1126,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
time.Sleep(1 * time.Second)
}
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
}
// Copy src to this remote using server-side copy operations.
@@ -1196,7 +1194,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1289,7 +1287,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1356,7 +1354,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1382,7 +1380,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -1432,7 +1430,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var result api.CreateShareLinkResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
fmt.Println(err)
@@ -1477,7 +1475,7 @@ func (o *Object) deleteVersions(ctx context.Context) error {
var versions api.VersionsResponse
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1504,7 +1502,7 @@ func (o *Object) deleteVersion(ctx context.Context, ID string) error {
opts.NoResponse = true
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1655,7 +1653,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
// Remove versions if required
if o.fs.opt.NoVersions {
@@ -1697,7 +1695,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1725,7 +1723,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return response, err
}
@@ -1740,7 +1738,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return 0, err
@@ -1800,11 +1798,11 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
}
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
@@ -1827,7 +1825,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return
}
@@ -1898,7 +1896,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
err = errors.New(err.Error() + " (is it a OneNote file?)")
}
}
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err

View File

@@ -119,7 +119,6 @@ type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
id string // ID of the file
parent string // ID of the parent directory
modTime time.Time // The modified time of the object if known
md5 string // MD5 hash if known
size int64 // Size of the object
@@ -207,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
Path: "/session/login.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to create session")
@@ -234,7 +233,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// No root so return old f
return f, nil
}
_, err := tempF.newObjectWithInfo(ctx, remote, nil, "")
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
@@ -294,7 +293,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
Path: "/folder/remove.json",
}
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
}
@@ -389,7 +388,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -446,7 +445,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -495,7 +494,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Path: "/folder/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
fs.Debugf(src, "DirMove error %v", err)
@@ -518,7 +517,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, parent string) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (fs.Object, error) {
// fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file)
var o *Object
@@ -527,7 +526,6 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, p
fs: f,
remote: remote,
id: file.FileID,
parent: parent,
modTime: time.Unix(file.DateModified, 0),
size: file.Size,
md5: file.FileHash,
@@ -550,7 +548,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File, p
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// fs.Debugf(nil, "NewObject(\"%s\")", remote)
return f.newObjectWithInfo(ctx, remote, nil, "")
return f.newObjectWithInfo(ctx, remote, nil)
}
// Creates from the parameters passed in a half finished Object which
@@ -583,7 +581,7 @@ func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *Fold
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -633,7 +631,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to create file")
@@ -659,10 +657,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -688,7 +683,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Path: "/folder.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -716,7 +711,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", false, errors.Wrap(err, "failed to get folder list")
@@ -759,7 +754,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
folderList := FolderList{}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get folder list")
@@ -773,7 +768,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
f.dirCache.Put(remote, folder.FolderID)
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
d.SetItems(int64(folder.ChildFolders))
d.SetParentID(directoryID)
entries = append(entries, d)
}
@@ -781,7 +775,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
file.Name = f.opt.Enc.ToStandardName(file.Name)
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
remote := path.Join(dir, file.Name)
o, err := f.newObjectWithInfo(ctx, remote, &file, directoryID)
o, err := f.newObjectWithInfo(ctx, remote, &file)
if err != nil {
return nil, err
}
@@ -848,7 +842,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
o.modTime = modTime
@@ -868,7 +862,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open file)")
@@ -887,7 +881,7 @@ func (o *Object) Remove(ctx context.Context) error {
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
}
resp, err := o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
}
@@ -916,7 +910,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
@@ -960,7 +954,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
@@ -983,7 +977,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Path: "/upload/close_file_upload.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create file")
@@ -1009,7 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Path: "/file/access.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1035,7 +1029,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to get folder list")
@@ -1059,11 +1053,6 @@ func (o *Object) ID() string {
return o.id
}
// ParentID returns the ID of the Object parent directory if known, or "" if not
func (o *Object) ParentID() string {
return o.parent
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1074,5 +1063,4 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.ParentIDer = (*Object)(nil)
)

View File

@@ -213,10 +213,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
doRetry := false
// Check if it is an api.Error
@@ -408,7 +405,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -463,7 +460,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -600,7 +597,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -665,7 +662,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -703,7 +700,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -743,7 +740,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -790,7 +787,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -817,7 +814,7 @@ func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (str
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -841,7 +838,7 @@ func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (str
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -872,7 +869,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
err = q.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -930,7 +927,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return err
@@ -1049,7 +1046,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -1075,7 +1072,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1137,7 +1134,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// opts.Body=0), so upload it as a multipart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
if err != nil {
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
}
@@ -1154,7 +1151,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
// sometimes pcloud leaves a half complete file on
@@ -1184,7 +1181,7 @@ func (o *Object) Remove(ctx context.Context) error {
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}

View File

@@ -176,10 +176,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -373,7 +370,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -410,7 +407,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -584,7 +581,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var result api.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -663,7 +660,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
var result api.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "Move http")
@@ -772,7 +769,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "CreateDir http")
@@ -899,7 +896,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -937,7 +934,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
// Just check the download URL resolves - sometimes
// the URLs returned by premiumize.me don't resolve so
@@ -996,7 +993,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var result api.Response
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "upload file http")
@@ -1038,7 +1035,7 @@ func (f *Fs) renameLeaf(ctx context.Context, isFile bool, id string, newLeaf str
var result api.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rename http")
@@ -1063,7 +1060,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
var result api.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "remove http")

View File

@@ -1,7 +1,6 @@
package putio
import (
"context"
"fmt"
"net/http"
@@ -30,10 +29,7 @@ func (e *statusCodeError) Temporary() bool {
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}

View File

@@ -147,7 +147,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return itoa(entry.ID), err
}
@@ -164,7 +164,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing file: %d", fileID)
children, _, err = f.client.Files.List(ctx, fileID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
@@ -205,7 +205,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files inside List: %d", parentID)
children, _, err = f.client.Files.List(ctx, parentID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -271,7 +271,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "getting file: %d", fileID)
entry, err = f.client.Files.Get(ctx, fileID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -295,7 +295,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
fs.OpenOptionAddHTTPHeaders(req.Header, options)
resp, err := f.oAuthClient.Do(req)
retry, err := shouldRetry(ctx, err)
retry, err := shouldRetry(err)
if retry {
return true, err
}
@@ -320,7 +320,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
err = f.pacer.Call(func() (bool, error) {
fs.Debugf(f, "Sending zero length chunk")
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
return
}
@@ -344,13 +344,13 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
// Get file offset and seek to the position
offset, err := f.getServerOffset(ctx, location)
if err != nil {
return shouldRetry(ctx, err)
return shouldRetry(err)
}
sentBytes := offset - chunkStart
fs.Debugf(f, "sentBytes: %d", sentBytes)
_, err = chunk.Seek(sentBytes, io.SeekStart)
if err != nil {
return shouldRetry(ctx, err)
return shouldRetry(err)
}
transferOffset = offset
reqSize = chunkSize - sentBytes
@@ -367,7 +367,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
offsetMismatch = true
return true, errors.New("connection broken")
}
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -479,7 +479,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
@@ -493,7 +493,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "deleting file: %d", dirID)
err = f.client.Files.Delete(ctx, dirID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
f.dirCache.FlushDir(dir)
return err
@@ -552,7 +552,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -591,7 +591,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -631,7 +631,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
srcFs.dirCache.FlushDir(srcRemote)
return err
@@ -644,7 +644,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "getting account info")
ai, err = f.client.Account.Info(ctx)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
@@ -678,6 +678,6 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
}
// fs.Debugf(f, "emptying trash")
_, err = f.client.Do(req, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}

View File

@@ -145,7 +145,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
return false, fs.ErrorObjectNotFound
}
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -220,7 +220,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var storageURL string
err = o.fs.pacer.Call(func() (bool, error) {
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return
@@ -231,7 +231,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, storageURL, nil)
if err != nil {
return shouldRetry(ctx, err)
return shouldRetry(err)
}
req.Header.Set("User-Agent", o.fs.client.UserAgent)
@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = o.fs.httpClient.Do(req)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
_ = resp.Body.Close()
@@ -283,6 +283,6 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return o.fs.pacer.Call(func() (bool, error) {
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
err = o.fs.client.Files.Delete(ctx, o.file.ID)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}

View File

@@ -1399,10 +1399,7 @@ var retryErrorCodes = []int{
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
@@ -1414,7 +1411,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
// 301 if wrong region for bucket - can only update if running from a bucket
if f.rootBucket != "" {
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket(ctx, f.rootBucket)
urfbErr := f.updateRegionForBucket(f.rootBucket)
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
@@ -1465,7 +1462,7 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
}
// s3Connection makes a connection to s3
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
@@ -1543,7 +1540,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
awsConfig := aws.NewConfig().
WithMaxRetries(0). // Rely on rclone's retry logic
WithCredentials(cred).
WithHTTPClient(client).
WithHTTPClient(getClient(ctx, opt)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
@@ -1562,8 +1559,9 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// Set the name of the profile if supplied
awsSessionOpts.Profile = opt.Profile
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
@@ -1649,8 +1647,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
}
srv := getClient(ctx, opt)
c, ses, err := s3Connection(ctx, opt, srv)
c, ses, err := s3Connection(ctx, opt)
if err != nil {
return nil, err
}
@@ -1665,7 +1662,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ses: ses,
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
srv: srv,
srv: getClient(ctx, opt),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
@@ -1700,9 +1697,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
@@ -1746,7 +1746,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// Gets the bucket location
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
func (f *Fs) getBucketLocation(bucket string) (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
@@ -1754,7 +1754,7 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return "", err
@@ -1764,8 +1764,8 @@ func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, erro
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
region, err := f.getBucketLocation(ctx, bucket)
func (f *Fs) updateRegionForBucket(bucket string) error {
region, err := f.getBucketLocation(bucket)
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
@@ -1779,7 +1779,7 @@ func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
c, ses, err := s3Connection(f.ctx, &f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
@@ -1859,7 +1859,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
}
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -2006,7 +2006,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBucketsWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2121,7 +2121,7 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
@@ -2157,7 +2157,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
@@ -2187,7 +2187,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucket)
@@ -2247,7 +2247,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CopyObjectWithContext(ctx, req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2291,7 +2291,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
if err := f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}); err != nil {
return err
}
@@ -2307,7 +2307,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
})()
@@ -2330,7 +2330,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
if err != nil {
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
parts = append(parts, &s3.CompletedPart{
PartNumber: &partNum,
@@ -2352,7 +2352,7 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
}
@@ -2583,7 +2583,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
reqCopy.Key = &bucketPath
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.RestoreObject(&reqCopy)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
st.Status = err.Error()
@@ -2631,7 +2631,7 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uplo
var resp *s3.ListMultipartUploadsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListMultipartUploads(&req)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key)
@@ -2806,7 +2806,7 @@ func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
@@ -2962,7 +2962,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var err error
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
err = httpReq.Send()
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
@@ -3021,7 +3021,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
err = f.pacer.Call(func() (bool, error) {
var err error
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to initialise")
@@ -3040,7 +3040,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
UploadId: uid,
RequestPayer: req.RequestPayer,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
@@ -3116,7 +3116,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
if err != nil {
if partNum <= int64(concurrency) {
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
}
// retry all chunks once have done the first batch
return true, err
@@ -3156,7 +3156,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalise")
@@ -3311,11 +3311,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var err error
resp, err = o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
@@ -3366,7 +3366,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(ctx, err)
return o.fs.shouldRetry(err)
})
return err
}

View File

@@ -372,6 +372,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
config.SaveConfig()
// And we're done here
break
}
@@ -407,10 +408,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
// For 429 errors look at the Retry-After: header and
// set the retry appropriately, starting with a minimum of 1
// second if it isn't set.

View File

@@ -86,7 +86,7 @@ func (f *Fs) getServerInfo(ctx context.Context) (account *api.ServerInfo, err er
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -112,7 +112,7 @@ func (f *Fs) getUserAccountInfo(ctx context.Context) (account *api.AccountInfo,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -139,7 +139,7 @@ func (f *Fs) getLibraries(ctx context.Context) ([]api.Library, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -170,7 +170,7 @@ func (f *Fs) createLibrary(ctx context.Context, libraryName, password string) (l
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -197,7 +197,7 @@ func (f *Fs) deleteLibrary(ctx context.Context, libraryID string) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -228,7 +228,7 @@ func (f *Fs) decryptLibrary(ctx context.Context, libraryID, password string) err
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -271,7 +271,7 @@ func (f *Fs) getDirectoryEntriesAPIv21(ctx context.Context, libraryID, dirPath s
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -316,7 +316,7 @@ func (f *Fs) getDirectoryDetails(ctx context.Context, libraryID, dirPath string)
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -358,7 +358,7 @@ func (f *Fs) createDir(ctx context.Context, libraryID, dirPath string) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -398,7 +398,7 @@ func (f *Fs) renameDir(ctx context.Context, libraryID, dirPath, newName string)
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -438,7 +438,7 @@ func (f *Fs) moveDir(ctx context.Context, srcLibraryID, srcDir, srcName, dstLibr
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -474,7 +474,7 @@ func (f *Fs) deleteDir(ctx context.Context, libraryID, filePath string) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -505,7 +505,7 @@ func (f *Fs) getFileDetails(ctx context.Context, libraryID, filePath string) (*a
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -539,7 +539,7 @@ func (f *Fs) deleteFile(ctx context.Context, libraryID, filePath string) error {
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to delete file")
@@ -565,7 +565,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -614,7 +614,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -659,7 +659,7 @@ func (f *Fs) getUploadLink(ctx context.Context, libraryID string) (string, error
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
"need_idx_progress": {"true"},
"replace": {"1"},
}
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}
@@ -739,7 +739,7 @@ func (f *Fs) listShareLinks(ctx context.Context, libraryID, remote string) ([]ap
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -777,7 +777,7 @@ func (f *Fs) createShareLink(ctx context.Context, libraryID, remote string) (*ap
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -818,7 +818,7 @@ func (f *Fs) copyFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -860,7 +860,7 @@ func (f *Fs) moveFile(ctx context.Context, srcLibraryID, srcPath, dstLibraryID,
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -900,7 +900,7 @@ func (f *Fs) renameFile(ctx context.Context, libraryID, filePath, newname string
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &request, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -938,7 +938,7 @@ func (f *Fs) emptyLibraryTrash(ctx context.Context, libraryID string) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -976,7 +976,7 @@ func (f *Fs) getDirectoryEntriesAPIv2(ctx context.Context, libraryID, dirPath st
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -1030,7 +1030,7 @@ func (f *Fs) copyFileAPIv2(ctx context.Context, srcLibraryID, srcPath, dstLibrar
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {
@@ -1075,7 +1075,7 @@ func (f *Fs) renameFileAPIv2(ctx context.Context, libraryID, filePath, newname s
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if resp != nil {

View File

@@ -204,36 +204,6 @@ Fstat instead of Stat which is called on an already open file handle.
It has been found that this helps with IBM Sterling SFTP servers which have
"extractability" level set to 1 which means only 1 file can be opened at
any given time.
`,
Advanced: true,
}, {
Name: "disable_concurrent_reads",
Default: false,
Help: `If set don't use concurrent reads
Normally concurrent reads are safe to use and not using them will
degrade performance, so this option is disabled by default.
Some servers limit the amount number of times a file can be
downloaded. Using concurrent reads can trigger this limit, so if you
have a server which returns
Failed to copy: file does not exist
Then you may need to enable this flag.
If concurrent reads are disabled, the use_fstat option is ignored.
`,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}},
@@ -243,29 +213,27 @@ Set to 0 to keep connections indefinitely.
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Port string `config:"port"`
Pass string `config:"pass"`
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
UseFstat bool `config:"use_fstat"`
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
IdleTimeout fs.Duration `config:"idle_timeout"`
Host string `config:"host"`
User string `config:"user"`
Port string `config:"port"`
Pass string `config:"pass"`
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
UseFstat bool `config:"use_fstat"`
}
// Fs stores the interface to the remote SFTP files
@@ -283,8 +251,7 @@ type Fs struct {
cachedHashes *hash.Set
poolMu sync.Mutex
pool []*conn
drain *time.Timer // used to drain the pool when we stop using the connections
pacer *fs.Pacer // pacer for operations
pacer *fs.Pacer // pacer for operations
savedpswd string
}
@@ -393,10 +360,7 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
}
}
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
opts = append(opts,
sftp.UseFstat(f.opt.UseFstat),
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
)
opts = append(opts, sftp.UseFstat(f.opt.UseFstat))
return sftp.NewClientPipe(pr, pw, opts...)
}
@@ -464,9 +428,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock()
}
@@ -474,12 +435,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if cErr := c.closed(); cErr == nil {
cErr = c.close()
@@ -712,10 +667,6 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
f.mkdirLock = newStringLock()
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
f.savedpswd = ""
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -1354,19 +1305,18 @@ func (o *Object) stat(ctx context.Context) error {
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if !o.fs.opt.SetModTime {
return nil
if o.fs.opt.SetModTime {
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime")
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime")
}
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
o.fs.putSftpConnection(&c, err)
if err != nil {
return errors.Wrap(err, "SetModTime failed")
}
err = o.stat(ctx)
err := o.stat(ctx)
if err != nil {
return errors.Wrap(err, "SetModTime stat failed")
}
@@ -1497,28 +1447,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remove()
return errors.Wrap(err, "Update Close failed")
}
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return errors.Wrap(err, "Update SetModTime failed")
}
// Stat the file after the upload to read its stats back if o.fs.opt.SetModTime == false
if !o.fs.opt.SetModTime {
err = o.stat(ctx)
if err == fs.ErrorObjectNotFound {
// In the specific case of o.fs.opt.SetModTime == false
// if the object wasn't found then don't return an error
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
o.modTime = src.ModTime(ctx)
o.size = src.Size()
o.mode = os.FileMode(0666) // regular file
} else if err != nil {
return errors.Wrap(err, "Update stat failed")
}
}
return nil
}

View File

@@ -299,10 +299,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -327,7 +324,7 @@ func (f *Fs) readMetaDataForIDPath(ctx context.Context, id, path string, directo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &item)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
@@ -634,7 +631,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &req, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "CreateDir")
@@ -666,7 +663,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -915,7 +912,7 @@ func (f *Fs) updateItem(ctx context.Context, id, leaf, directoryID string, modTi
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1136,7 +1133,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1297,7 +1294,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var dl api.DownloadSpecification
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "open: fetch download specification")
@@ -1312,7 +1309,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "open")
@@ -1368,7 +1365,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "upload get specification")
@@ -1393,7 +1390,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var finish api.UploadFinishResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &finish)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "upload file")
@@ -1429,7 +1426,7 @@ func (f *Fs) remove(ctx context.Context, id string) (err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "remove")

View File

@@ -155,7 +155,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
}
respBody, err = rest.ReadBody(resp)
// retry all errors now that the multipart upload has started

View File

@@ -111,7 +111,7 @@ func init() {
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
// return shouldRetry(resp, err)
//})
if err != nil {
log.Fatalf("Failed to get token: %v", err)
@@ -248,10 +248,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -291,7 +288,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, ID string) (info *api.File,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
@@ -328,7 +325,7 @@ func (f *Fs) getAuthToken(ctx context.Context) error {
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &authRequest, &authResponse)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to get authorization")
@@ -376,7 +373,7 @@ func (f *Fs) getUser(ctx context.Context) (user *api.User, err error) {
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &user)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get user")
@@ -570,7 +567,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, mkdir, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -621,7 +618,7 @@ OUTER:
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -777,7 +774,7 @@ func (f *Fs) delete(ctx context.Context, isFile bool, id string, remote string,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
// Move file/dir to deleted files if not hard delete
@@ -883,7 +880,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &copyFile, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -937,7 +934,7 @@ func (f *Fs) moveFile(ctx context.Context, id, leaf, directoryID string) (info *
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &move, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -967,7 +964,7 @@ func (f *Fs) moveDir(ctx context.Context, id, leaf, directoryID string) (err err
var resp *http.Response
return f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &move, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
}
@@ -1056,7 +1053,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var info *api.File
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &linkFile, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -1185,7 +1182,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1207,7 +1204,7 @@ func (f *Fs) createFile(ctx context.Context, pathID, leaf, mimeType string) (new
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, &mkdir, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
@@ -1265,7 +1262,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to upload file")

View File

@@ -13,7 +13,6 @@ import (
"strings"
"time"
"github.com/google/uuid"
"github.com/ncw/swift/v2"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
@@ -292,10 +291,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(err error) (bool, error) {
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
@@ -311,7 +307,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
@@ -330,7 +326,7 @@ func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (
}
}
}
return shouldRetry(ctx, err)
return shouldRetry(err)
}
// parsePath parses a remote 'url'
@@ -472,7 +468,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
@@ -580,7 +576,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(ctx, container, opts)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
for i := range objects {
@@ -665,7 +661,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
@@ -757,7 +753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "container listing failed")
@@ -809,7 +805,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(ctx, container)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
@@ -819,7 +815,7 @@ func (f *Fs) makeContainer(ctx context.Context, container string) error {
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(ctx, container, headers)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
@@ -840,7 +836,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(ctx, container)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
@@ -906,125 +902,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
isLargeObject, err := srcObj.isLargeObject(ctx)
if err != nil {
return nil, err
}
if isLargeObject {
/*handle large object*/
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath)
} else {
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
}
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error {
segmentsContainer := dstContainer + "_segments"
err := f.makeContainer(ctx, segmentsContainer)
if err != nil {
return err
}
segments, err := src.getSegmentsLargeObject(ctx)
if err != nil {
return err
}
if len(segments) == 0 {
return errors.New("could not copy object, list segments are empty")
}
nanoSeconds := time.Now().Nanosecond()
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", ""))
copiedSegmentsLen := 10
for _, value := range segments {
if len(value) <= 0 {
continue
}
fragment := value[0]
if len(fragment) <= 0 {
continue
}
copiedSegmentsLen = len(value)
firstIndex := strings.Index(fragment, "/")
if firstIndex < 0 {
firstIndex = 0
} else {
firstIndex = firstIndex + 1
}
lastIndex := strings.LastIndex(fragment, "/")
if lastIndex < 0 {
lastIndex = len(fragment)
} else {
lastIndex = lastIndex - 1
}
prefixSegment = fragment[firstIndex:lastIndex]
break
}
copiedSegments := make([]string, copiedSegmentsLen)
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
for c, ss := range segments {
if len(ss) <= 0 {
continue
}
for _, s := range ss {
lastIndex := strings.LastIndex(s, "/")
if lastIndex <= 0 {
lastIndex = 0
} else {
lastIndex = lastIndex + 1
}
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
copiedSegments = append(copiedSegments, segmentName)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
}
}
m := swift.Metadata{}
headers := m.ObjectHeaders()
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
headers["Content-Length"] = "0"
emptyReader := bytes.NewReader(nil)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
return err
}
//remove copied segments when copy process failed
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
fs.Debugf(f, "handle copy segment fail")
if err == nil {
return
}
if len(segmentsContainer) == 0 {
fs.Debugf(f, "invalid segments container")
return
}
if len(segments) == 0 {
fs.Debugf(f, "segments is empty")
return
}
fs.Debugf(f, "action delete segments what copied")
for _, v := range segments {
_ = f.c.ObjectDelete(ctx, segmentsContainer, v)
}
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1152,7 +1041,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(ctx, container, containerPath)
return shouldRetryHeaders(ctx, h, err)
return shouldRetryHeaders(h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
@@ -1211,7 +1100,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
@@ -1232,7 +1121,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
return
}
@@ -1322,7 +1211,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
@@ -1331,7 +1220,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
}
if err != nil {
@@ -1352,8 +1241,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
_ctx := context.Background()
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
deleteChunks(ctx, o, segmentsContainer, segmentInfos)
})()
for {
// can we read at least one byte?
@@ -1379,7 +1267,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return "", err
@@ -1393,7 +1281,7 @@ func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil {
@@ -1468,7 +1356,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
return err
@@ -1526,7 +1414,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
return shouldRetry(ctx, err)
return shouldRetry(err)
})
if err != nil {
return err

View File

@@ -1,7 +1,6 @@
package swift
import (
"context"
"testing"
"time"
@@ -33,7 +32,6 @@ func TestInternalUrlEncode(t *testing.T) {
}
func TestInternalShouldRetryHeaders(t *testing.T) {
ctx := context.Background()
headers := swift.Headers{
"Content-Length": "64",
"Content-Type": "text/html; charset=UTF-8",
@@ -47,7 +45,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
// Short sleep should just do the sleep
start := time.Now()
retry, gotErr := shouldRetryHeaders(ctx, headers, err)
retry, gotErr := shouldRetryHeaders(headers, err)
dt := time.Since(start)
assert.True(t, retry)
assert.Equal(t, err, gotErr)
@@ -56,7 +54,7 @@ func TestInternalShouldRetryHeaders(t *testing.T) {
// Long sleep should return RetryError
headers["Retry-After"] = "3600"
start = time.Now()
retry, gotErr = shouldRetryHeaders(ctx, headers, err)
retry, gotErr = shouldRetryHeaders(headers, err)
dt = time.Since(start)
assert.True(t, dt < time.Second)
assert.False(t, retry)

View File

@@ -80,7 +80,6 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("NoChunk", f.testNoChunk)
t.Run("WithChunk", f.testWithChunk)
t.Run("WithChunkFail", f.testWithChunkFail)
t.Run("CopyLargeObject", f.testCopyLargeObject)
}
func (f *Fs) testWithChunk(t *testing.T) {
@@ -155,39 +154,4 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
require.Empty(t, objs)
}
func (f *Fs) testCopyLargeObject(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.Byte
defer func() {
//restore old config after test
f.opt.ChunkSize = preConfChunkSize
f.opt.NoChunk = preConfChunk
}()
file := fstest.Item{
ModTime: fstest.Time("2020-12-31T04:05:06.499999999Z"),
Path: "large.txt",
Size: -1, // use unknown size during upload
}
const contentSize = 2048
contents := random.String(contentSize)
buf := bytes.NewBufferString(contents)
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
require.NotEmpty(t, obj)
remoteTarget := "large.txt (copy)"
objTarget, err := f.Features().Copy(ctx, obj, remoteTarget)
require.NoError(t, err)
require.NotEmpty(t, objTarget)
require.Equal(t, obj.Size(), objTarget.Size())
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -59,17 +59,7 @@ func (d *Directory) candidates() []upstream.Entry {
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
entries, err := o.fs.actionEntries(o.candidates()...)
if err == fs.ErrorPermissionDenied {
// There are no candidates in this object which can be written to
// So attempt to create a new object instead
newO, err := o.fs.put(ctx, in, src, false, options...)
if err != nil {
return err
}
// Update current object
*o = *newO.(*Object)
return nil
} else if err != nil {
if err != nil {
return err
}
if len(entries) == 1 {

View File

@@ -17,9 +17,7 @@ func init() {
type EpFF struct{}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
ch := make(chan *upstream.Fs, len(upstreams))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ch := make(chan *upstream.Fs)
for _, u := range upstreams {
u := u // Closure
go func() {
@@ -32,10 +30,16 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
}()
}
var u *upstream.Fs
for range upstreams {
for i := 0; i < len(upstreams); i++ {
u = <-ch
if u != nil {
break
// close remaining goroutines
go func(num int) {
defer close(ch)
for i := 0; i < num; i++ {
<-ch
}
}(len(upstreams) - 1 - i)
}
}
if u == nil {

View File

@@ -1,67 +0,0 @@
package union
import (
"bytes"
"context"
"testing"
"time"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func (f *Fs) TestInternalReadOnly(t *testing.T) {
if f.name != "TestUnionRO" {
t.Skip("Only on RO union")
}
dir := "TestInternalReadOnly"
ctx := context.Background()
rofs := f.upstreams[len(f.upstreams)-1]
assert.False(t, rofs.IsWritable())
// Put a file onto the read only fs
contents := random.String(50)
file1 := fstest.NewItem(dir+"/file.txt", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, rofs, &file1, contents, true)
// Check read from readonly fs via union
o, err := f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Now call Update on the union Object with new data
contents2 := random.String(100)
file2 := fstest.NewItem(dir+"/file.txt", contents2, time.Now())
in := bytes.NewBufferString(contents2)
src := object.NewStaticObjectInfo(file2.Path, file2.ModTime, file2.Size, true, nil, nil)
err = o.Update(ctx, in, src)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Check we read the new object via the union
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(100), o.Size())
// Remove the object
assert.NoError(t, o.Remove(ctx))
// Check we read the old object in the read only layer now
o, err = f.NewObject(ctx, file1.Path)
require.NoError(t, err)
assert.Equal(t, int64(50), o.Size())
// Remove file and dir from read only fs
assert.NoError(t, obj1.Remove(ctx))
assert.NoError(t, rofs.Rmdir(ctx, dir))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ReadOnly", f.TestInternalReadOnly)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -2,15 +2,13 @@
package union_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -26,28 +24,17 @@ func TestIntegration(t *testing.T) {
})
}
func makeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
for i := 1; i <= n; i++ {
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
require.NoError(t, err)
dirs = append(dirs, dir)
}
clean = func() {
for _, dir := range dirs {
err := os.RemoveAll(dir)
assert.NoError(t, err)
}
}
return dirs, clean
}
func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -67,9 +54,13 @@ func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -89,9 +80,13 @@ func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -111,9 +106,13 @@ func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -133,9 +132,13 @@ func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
@@ -155,9 +158,13 @@ func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs, clean := makeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy31")
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy32")
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy33")
require.NoError(t, os.MkdirAll(tempdir1, 0744))
require.NoError(t, os.MkdirAll(tempdir2, 0744))
require.NoError(t, os.MkdirAll(tempdir3, 0744))
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
)
var (
@@ -63,12 +62,12 @@ type Entry interface {
// New creates a new Fs based on the
// string formatted `type:root_path(:ro/:nc)`
func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs, error) {
configName, fsPath, err := fspath.SplitFs(remote)
_, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
return nil, err
}
f := &Fs{
RootPath: strings.TrimRight(root, "/"),
RootPath: root,
writable: true,
creatable: true,
cacheExpiry: time.Now().Unix(),
@@ -84,13 +83,15 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
}
remote = configName + fsPath
rFs, err := cache.Get(ctx, remote)
if configName != "local" {
fsPath = configName + ":" + fsPath
}
rFs, err := cache.Get(ctx, fsPath)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.RootFs = rFs
rootString := path.Join(remote, filepath.ToSlash(root))
rootString := path.Join(fsPath, filepath.ToSlash(root))
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err

View File

@@ -10,7 +10,6 @@ package webdav
import (
"bytes"
"context"
"crypto/tls"
"encoding/xml"
"fmt"
"io"
@@ -20,25 +19,20 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/webdav/api"
"github.com/rclone/rclone/backend/webdav/odrvcookie"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
ntlmssp "github.com/Azure/go-ntlmssp"
)
const (
@@ -48,22 +42,8 @@ const (
defaultDepth = "1" // depth for PROPFIND
)
const defaultEncodingSharepointNTLM = (encoder.EncodeWin |
encoder.EncodeHashPercent | // required by IIS/8.5 in contrast with onedrive which doesn't need it
(encoder.Display &^ encoder.EncodeDot) | // test with IIS/8.5 shows that EncodeDot is not needed
encoder.EncodeBackSlash |
encoder.EncodeLeftSpace |
encoder.EncodeLeftTilde |
encoder.EncodeRightPeriod |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8)
// Register with Fs
func init() {
configEncodingHelp := fmt.Sprintf(
"%s\n\nDefault encoding is %s for sharepoint-ntlm or identity otherwise.",
config.ConfigEncodingHelp, defaultEncodingSharepointNTLM)
fs.Register(&fs.RegInfo{
Name: "webdav",
Description: "Webdav",
@@ -87,17 +67,14 @@ func init() {
Help: "Owncloud",
}, {
Value: "sharepoint",
Help: "Sharepoint Online, authenticated by Microsoft account.",
}, {
Value: "sharepoint-ntlm",
Help: "Sharepoint with NTLM authentication. Usually self-hosted or on-premises.",
Help: "Sharepoint",
}, {
Value: "other",
Help: "Other site/service or software",
}},
}, {
Name: "user",
Help: "User name. In case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
Help: "User name",
}, {
Name: "pass",
Help: "Password.",
@@ -109,23 +86,18 @@ func init() {
Name: "bearer_token_command",
Help: "Command to run to get a bearer token",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: configEncodingHelp,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
URL string `config:"url"`
Vendor string `config:"vendor"`
User string `config:"user"`
Pass string `config:"pass"`
BearerToken string `config:"bearer_token"`
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
URL string `config:"url"`
Vendor string `config:"vendor"`
User string `config:"user"`
Pass string `config:"pass"`
BearerToken string `config:"bearer_token"`
BearerTokenCommand string `config:"bearer_token_command"`
}
// Fs represents a remote webdav
@@ -142,10 +114,8 @@ type Fs struct {
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
checkBeforePurge bool // enables extra check that directory to purge really exists
hasMD5 bool // set if can use owncloud style checksums for MD5
hasSHA1 bool // set if can use owncloud style checksums for SHA1
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
}
// Object describes a webdav object
@@ -196,10 +166,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
// If we have a bearer token command and it has expired then refresh it
if f.opt.BearerTokenCommand != "" && resp != nil && resp.StatusCode == 401 {
fs.Debugf(f, "Bearer token expired: %v", err)
@@ -212,22 +179,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// safeRoundTripper is a wrapper for http.RoundTripper that serializes
// http roundtrips. NTLM authentication sequence can involve up to four
// rounds of negotiations and might fail due to concurrency.
// This wrapper allows to use ntlmssp.Negotiator safely with goroutines.
type safeRoundTripper struct {
fs *Fs
rt http.RoundTripper
}
// RoundTrip guards wrapped RoundTripper by a mutex.
func (srt *safeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
srt.fs.ntlmAuthMu.Lock()
defer srt.fs.ntlmAuthMu.Unlock()
return srt.rt.RoundTrip(req)
}
// itemIsDir returns true if the item is a directory
//
// When a client sees a resourcetype it doesn't recognize it should
@@ -273,7 +224,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// does not exist
@@ -334,11 +285,7 @@ func addSlash(s string) string {
// filePath returns a file path (f.root, file)
func (f *Fs) filePath(file string) string {
subPath := path.Join(f.root, file)
if f.opt.Enc != encoder.EncodeZero {
subPath = f.opt.Enc.FromStandardPath(subPath)
}
return rest.URLPathEscape(subPath)
return rest.URLPathEscape(path.Join(f.root, file))
}
// dirPath returns a directory path (f.root, dir)
@@ -377,10 +324,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
root = strings.Trim(root, "/")
if opt.Enc == encoder.EncodeZero && opt.Vendor == "sharepoint-ntlm" {
opt.Enc = defaultEncodingSharepointNTLM
}
// Parse the endpoint
u, err := url.Parse(opt.URL)
if err != nil {
@@ -393,28 +336,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
endpoint: u,
endpointURL: u.String(),
srv: rest.NewClient(fshttp.NewClient(ctx)).SetRoot(u.String()),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported,
}
client := fshttp.NewClient(ctx)
if opt.Vendor == "sharepoint-ntlm" {
// Disable transparent HTTP/2 support as per https://golang.org/pkg/net/http/ ,
// otherwise any connection to IIS 10.0 fails with 'stream error: stream ID 39; HTTP_1_1_REQUIRED'
// https://docs.microsoft.com/en-us/iis/get-started/whats-new-in-iis-10/http2-on-iis says:
// 'Windows authentication (NTLM/Kerberos/Negotiate) is not supported with HTTP/2.'
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
})
// Add NTLM layer
client.Transport = &safeRoundTripper{
fs: f,
rt: ntlmssp.Negotiator{RoundTripper: t},
}
}
f.srv = rest.NewClient(client).SetRoot(u.String())
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
@@ -540,16 +465,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
// to determine if we may have found a file, the request has to be resent
// with the depth set to 0
f.retryWithZeroDepth = true
case "sharepoint-ntlm":
// Sharepoint with NTLM authentication
// See comment above
f.retryWithZeroDepth = true
// Sharepoint 2016 returns status 204 to the purge request
// even if the directory to purge does not really exist
// so we must perform an extra check to detect this
// condition and return a proper error code.
f.checkBeforePurge = true
case "other":
default:
fs.Debugf(f, "Unknown vendor %q", vendor)
@@ -631,7 +546,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
@@ -668,11 +583,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
fs.Debugf(nil, "Item with unknown path received: %q, %q", u.Path, baseURL.Path)
continue
}
subPath := u.Path[len(baseURL.Path):]
if f.opt.Enc != encoder.EncodeZero {
subPath = f.opt.Enc.ToStandardPath(subPath)
}
remote := path.Join(dir, subPath)
remote := path.Join(dir, u.Path[len(baseURL.Path):])
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
@@ -803,7 +714,7 @@ func (f *Fs) _dirExists(ctx context.Context, dirPath string) (exists bool) {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
return err == nil
}
@@ -825,7 +736,7 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// Check if it already exists. The response code for this isn't
@@ -889,21 +800,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if notEmpty {
return fs.ErrorDirectoryNotEmpty
}
} else if f.checkBeforePurge {
// We are doing purge as the `check` argument is unset.
// The quirk says that we are working with Sharepoint 2016.
// This provider returns status 204 even if the purged directory
// does not really exist so we perform an extra check here.
// Only the existence is checked, all other errors must be
// ignored here to make the rclone test suite pass.
depth := defaultDepth
if f.retryWithZeroDepth {
depth = "0"
}
_, err := f.readMetaDataForPath(ctx, dir, depth)
if err == fs.ErrorObjectNotFound {
return fs.ErrorDirNotFound
}
}
opts := rest.Opts{
Method: "DELETE",
@@ -914,7 +810,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, nil)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "rmdir failed")
@@ -977,7 +873,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "Copy call failed")
@@ -1073,7 +969,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "DirMove MOVE call failed")
@@ -1115,7 +1011,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &q)
return f.shouldRetry(ctx, resp, err)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "about call failed")
@@ -1243,7 +1139,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1294,7 +1190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
// Give the WebDAV server a chance to get its internal state in order after the
@@ -1321,7 +1217,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
return o.fs.shouldRetry(resp, err)
})
}

View File

@@ -38,14 +38,3 @@ func TestIntegration3(t *testing.T) {
NilObject: (*webdav.Object)(nil),
})
}
// TestIntegration runs integration tests against the remote
func TestIntegration4(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavNTLM:",
NilObject: (*webdav.Object)(nil),
})
}

View File

@@ -153,10 +153,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -229,7 +226,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -471,7 +468,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
// fmt.Printf("CreateDir %q Error: %s\n", path, err.Error())
@@ -540,15 +537,12 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
RootURL: location,
Method: "GET",
}
deadline := time.Now().Add(f.ci.TimeoutOrInfinite())
deadline := time.Now().Add(f.ci.Timeout)
for time.Now().Before(deadline) {
var resp *http.Response
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -574,7 +568,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
time.Sleep(1 * time.Second)
}
return errors.Errorf("async operation didn't complete after %v", f.ci.TimeoutOrInfinite())
return errors.Errorf("async operation didn't complete after %v", f.ci.Timeout)
}
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
@@ -591,9 +585,6 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -667,9 +658,6 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -822,7 +810,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.ErrorResponse); ok {
@@ -860,7 +848,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return err
}
@@ -877,7 +865,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1011,7 +999,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return err
}
@@ -1044,7 +1032,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1059,7 +1047,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -1083,7 +1071,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1101,7 +1089,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
return err

View File

@@ -36,8 +36,8 @@ import (
)
const (
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
rcloneClientID = "1000.OZNFWW075EKDSIE1R42HI9I2SUPC9A"
rcloneEncryptedClientSecret = "rn7myzbsYK3WlqO2EU6jU8wmj0ylsx7_1B5wvSaVncYbu1Wt0QxPW9FFbidjqAZtyxnBenYIWq1pcA"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
@@ -100,7 +100,7 @@ func init() {
log.Fatalf("Failed to configure root directory: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: "region",
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
Examples: []fs.OptionExample{{
@@ -123,7 +123,7 @@ func init() {
encoder.EncodeCtl |
encoder.EncodeDel |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
@@ -257,10 +257,7 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
func shouldRetry(resp *http.Response, err error) (bool, error) {
authRetry := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
@@ -357,7 +354,7 @@ func (f *Fs) readMetaDataForID(ctx context.Context, id string) (*api.Item, error
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -372,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err := configstruct.Set(m, opt); err != nil {
return nil, err
}
setupRegion(m)
root = parsePath(root)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
@@ -454,7 +450,7 @@ OUTER:
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return found, errors.Wrap(err, "couldn't list files")
@@ -559,7 +555,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
//fmt.Printf("...Error %v\n", err)
@@ -647,7 +643,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("filename", name)
params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
formReader, contentType, overhead, err := rest.MultipartUpload(in, nil, "content", name)
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}
@@ -668,7 +664,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
var uploadResponse *api.UploadResponse
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uploadResponse)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "upload error")
@@ -750,7 +746,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &delete, nil)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "delete object failed")
@@ -820,7 +816,7 @@ func (f *Fs) rename(ctx context.Context, id, name string) (item *api.Item, err e
var result *api.ItemInfo
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &rename, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "rename failed")
@@ -873,7 +869,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *api.ItemList
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
@@ -918,7 +914,7 @@ func (f *Fs) move(ctx context.Context, srcID, parentID string) (item *api.Item,
var result *api.ItemList
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &moveFile, &result)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "move failed")
@@ -1185,7 +1181,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, resp, err)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err

View File

@@ -27,22 +27,17 @@ import (
var (
// Flags
debug = flag.Bool("d", false, "Print commands instead of running them.")
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
copyAs = flag.String("release", "", "Make copies of the releases with this name")
gitLog = flag.String("git-log", "", "git log to include as well")
include = flag.String("include", "^.*$", "os/arch regexp to include")
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
cgo = flag.Bool("cgo", false, "Use cgo for the build")
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
tags = flag.String("tags", "", "Space separated list of build tags")
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
extraEnv = flag.String("env", "", "comma separated list of VAR=VALUE env vars to set")
macOSSDK = flag.String("macos-sdk", "", "macOS SDK to use")
macOSArch = flag.String("macos-arch", "", "macOS arch to use")
extraCgoCFlags = flag.String("cgo-cflags", "", "extra CGO_CFLAGS")
extraCgoLdFlags = flag.String("cgo-ldflags", "", "extra CGO_LDFLAGS")
debug = flag.Bool("d", false, "Print commands instead of running them.")
parallel = flag.Int("parallel", runtime.NumCPU(), "Number of commands to run in parallel.")
copyAs = flag.String("release", "", "Make copies of the releases with this name")
gitLog = flag.String("git-log", "", "git log to include as well")
include = flag.String("include", "^.*$", "os/arch regexp to include")
exclude = flag.String("exclude", "^$", "os/arch regexp to exclude")
cgo = flag.Bool("cgo", false, "Use cgo for the build")
noClean = flag.Bool("no-clean", false, "Don't clean the build directory before running.")
tags = flag.String("tags", "", "Space separated list of build tags")
buildmode = flag.String("buildmode", "", "Passed to go build -buildmode flag")
compileOnly = flag.Bool("compile-only", false, "Just build the binary, not the zip.")
)
// GOOS/GOARCH pairs we build for
@@ -52,7 +47,6 @@ var osarches = []string{
"windows/386",
"windows/amd64",
"darwin/amd64",
"darwin/arm64",
"linux/386",
"linux/amd64",
"linux/arm",
@@ -285,15 +279,6 @@ func stripVersion(goarch string) string {
return goarch[:i]
}
// run the command returning trimmed output
func runOut(command ...string) string {
out, err := exec.Command(command[0], command[1:]...).Output()
if err != nil {
log.Fatalf("Failed to run %q: %v", command, err)
}
return strings.TrimSpace(string(out))
}
// build the binary in dir returning success or failure
func compileArch(version, goos, goarch, dir string) bool {
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
@@ -329,35 +314,6 @@ func compileArch(version, goos, goarch, dir string) bool {
"GOOS=" + goos,
"GOARCH=" + stripVersion(goarch),
}
if *extraEnv != "" {
env = append(env, strings.Split(*extraEnv, ",")...)
}
var (
cgoCFlags []string
cgoLdFlags []string
)
if *macOSSDK != "" {
flag := "-isysroot " + runOut("xcrun", "--sdk", *macOSSDK, "--show-sdk-path")
cgoCFlags = append(cgoCFlags, flag)
cgoLdFlags = append(cgoLdFlags, flag)
}
if *macOSArch != "" {
flag := "-arch " + *macOSArch
cgoCFlags = append(cgoCFlags, flag)
cgoLdFlags = append(cgoLdFlags, flag)
}
if *extraCgoCFlags != "" {
cgoCFlags = append(cgoCFlags, *extraCgoCFlags)
}
if *extraCgoLdFlags != "" {
cgoLdFlags = append(cgoLdFlags, *extraCgoLdFlags)
}
if len(cgoCFlags) > 0 {
env = append(env, "CGO_CFLAGS="+strings.Join(cgoCFlags, " "))
}
if len(cgoLdFlags) > 0 {
env = append(env, "CGO_LDFLAGS="+strings.Join(cgoLdFlags, " "))
}
if !*cgo {
env = append(env, "CGO_ENABLED=0")
} else {

146
bin/make_test_files.go Normal file
View File

@@ -0,0 +1,146 @@
// +build ignore
// Build a directory structure with the required number of files in
//
// Run with go run make_test_files.go [flag] <directory>
package main
import (
cryptrand "crypto/rand"
"flag"
"io"
"log"
"math/rand"
"os"
"path/filepath"
)
var (
// Flags
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
maxFileNameLength = flag.Int("max-name-length", 12, "Maximum size of files to create")
directoriesToCreate int
totalDirectories int
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
)
// randomString create a random string for test purposes
func randomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// fileName creates a unique random file or directory name
func fileName() (name string) {
for {
length := rand.Intn(*maxFileNameLength-*minFileNameLength) + *minFileNameLength
name = randomString(length)
if _, found := fileNames[name]; !found {
break
}
}
fileNames[name] = struct{}{}
return name
}
// dir is a directory in the directory hierarchy being built up
type dir struct {
name string
depth int
children []*dir
parent *dir
}
// Create a random directory hierarchy under d
func (d *dir) createDirectories() {
for totalDirectories < directoriesToCreate {
newDir := &dir{
name: fileName(),
depth: d.depth + 1,
parent: d,
}
d.children = append(d.children, newDir)
totalDirectories++
switch rand.Intn(4) {
case 0:
if d.depth < *maxDepth {
newDir.createDirectories()
}
case 1:
return
}
}
return
}
// list the directory hierarchy
func (d *dir) list(path string, output []string) []string {
dirPath := filepath.Join(path, d.name)
output = append(output, dirPath)
for _, subDir := range d.children {
output = subDir.list(dirPath, output)
}
return output
}
// writeFile writes a random file at dir/name
func writeFile(dir, name string) {
err := os.MkdirAll(dir, 0777)
if err != nil {
log.Fatalf("Failed to make directory %q: %v", dir, err)
}
path := filepath.Join(dir, name)
fd, err := os.Create(path)
if err != nil {
log.Fatalf("Failed to open file %q: %v", path, err)
}
size := rand.Int63n(*maxFileSize-*minFileSize) + *minFileSize
_, err = io.CopyN(fd, cryptrand.Reader, size)
if err != nil {
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
}
err = fd.Close()
if err != nil {
log.Fatalf("Failed to close file %q: %v", path, err)
}
}
func main() {
flag.Parse()
args := flag.Args()
if len(args) != 1 {
log.Fatalf("Require 1 directory argument")
}
outputDirectory := args[0]
log.Printf("Output dir %q", outputDirectory)
directoriesToCreate = *numberOfFiles / *averageFilesPerDirectory
log.Printf("directoriesToCreate %v", directoriesToCreate)
root := &dir{name: outputDirectory, depth: 1}
for totalDirectories < directoriesToCreate {
root.createDirectories()
}
dirs := root.list("", []string{})
for i := 0; i < *numberOfFiles; i++ {
dir := dirs[rand.Intn(len(dirs))]
writeFile(dir, fileName())
}
}

View File

@@ -25,6 +25,7 @@ import (
_ "github.com/rclone/rclone/cmd/genautocomplete"
_ "github.com/rclone/rclone/cmd/gendocs"
_ "github.com/rclone/rclone/cmd/hashsum"
_ "github.com/rclone/rclone/cmd/info"
_ "github.com/rclone/rclone/cmd/link"
_ "github.com/rclone/rclone/cmd/listremotes"
_ "github.com/rclone/rclone/cmd/ls"
@@ -33,6 +34,7 @@ import (
_ "github.com/rclone/rclone/cmd/lsjson"
_ "github.com/rclone/rclone/cmd/lsl"
_ "github.com/rclone/rclone/cmd/md5sum"
_ "github.com/rclone/rclone/cmd/memtest"
_ "github.com/rclone/rclone/cmd/mkdir"
_ "github.com/rclone/rclone/cmd/mount"
_ "github.com/rclone/rclone/cmd/mount2"
@@ -47,17 +49,11 @@ import (
_ "github.com/rclone/rclone/cmd/reveal"
_ "github.com/rclone/rclone/cmd/rmdir"
_ "github.com/rclone/rclone/cmd/rmdirs"
_ "github.com/rclone/rclone/cmd/selfupdate"
_ "github.com/rclone/rclone/cmd/serve"
_ "github.com/rclone/rclone/cmd/settier"
_ "github.com/rclone/rclone/cmd/sha1sum"
_ "github.com/rclone/rclone/cmd/size"
_ "github.com/rclone/rclone/cmd/sync"
_ "github.com/rclone/rclone/cmd/test"
_ "github.com/rclone/rclone/cmd/test/histogram"
_ "github.com/rclone/rclone/cmd/test/info"
_ "github.com/rclone/rclone/cmd/test/makefiles"
_ "github.com/rclone/rclone/cmd/test/memory"
_ "github.com/rclone/rclone/cmd/touch"
_ "github.com/rclone/rclone/cmd/tree"
_ "github.com/rclone/rclone/cmd/version"

View File

@@ -29,8 +29,8 @@ rclone config.
Use the --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.`,
RunE: func(command *cobra.Command, args []string) error {
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 3, command, args)
return config.Authorize(context.Background(), args, noAutoBrowser)
config.Authorize(context.Background(), args, noAutoBrowser)
},
}

View File

@@ -25,7 +25,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configflags"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/filter"
@@ -36,7 +35,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
@@ -75,13 +73,9 @@ const (
// ShowVersion prints the version to stdout
func ShowVersion() {
linking, tagString := buildinfo.GetLinkingAndTags()
fmt.Printf("rclone %s\n", fs.Version)
fmt.Printf("- os/type: %s\n", runtime.GOOS)
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
fmt.Printf("- go/version: %s\n", runtime.Version())
fmt.Printf("- go/linking: %s\n", linking)
fmt.Printf("- go/tags: %s\n", tagString)
fmt.Printf("- os/arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
fmt.Printf("- go version: %s\n", runtime.Version())
}
// NewFsFile creates an Fs from a name but may point to a file.
@@ -89,7 +83,7 @@ func ShowVersion() {
// It returns a string with the file name if points to a file
// otherwise "".
func NewFsFile(remote string) (fs.Fs, string) {
_, fsPath, err := fspath.SplitFs(remote)
_, _, fsPath, err := fs.ParseRemote(remote)
if err != nil {
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
@@ -388,12 +382,6 @@ func initConfig() {
// Finish parsing any command line flags
configflags.SetFlags(ci)
// Load the config
configfile.LoadConfig(ctx)
// Start accounting
accounting.Start(ctx)
// Hide console window
if ci.NoConsole {
terminal.HideConsole()
@@ -553,9 +541,6 @@ func Main() {
setupRootCommand(Root)
AddBackendFlags()
if err := Root.Execute(); err != nil {
if strings.HasPrefix(err.Error(), "unknown command") {
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
}
log.Fatalf("Fatal error: %v", err)
}
}

View File

@@ -1,7 +0,0 @@
package cmount
// ProvidedBy returns true if the rclone build for the given OS
// provides support for lib/cgo-fuse
func ProvidedBy(osName string) bool {
return osName == "windows" || osName == "darwin"
}

View File

@@ -5,6 +5,7 @@
package cmount
import (
"context"
"io"
"os"
"path"
@@ -18,9 +19,11 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"golang.org/x/time/rate"
)
const fhUnset = ^uint64(0)
const tpsBurst = 100
// FS represents the top level filing system
type FS struct {
@@ -29,19 +32,31 @@ type FS struct {
ready chan (struct{})
mu sync.Mutex // to protect the below
handles []vfs.Handle
destroyed int32 // read/write with sync/atomic
destroyed int32 // read/write with sync/atomic
tps *rate.Limiter // for limiting number of transactions per second
}
// NewFS makes a new FS
func NewFS(VFS *vfs.VFS) *FS {
func NewFS(VFS *vfs.VFS, opt *mountlib.Options) *FS {
fsys := &FS{
VFS: VFS,
f: VFS.Fs(),
ready: make(chan (struct{})),
}
if opt.TPSLimit > 0 {
fsys.tps = rate.NewLimiter(rate.Limit(opt.TPSLimit), tpsBurst)
fs.Infof(nil, "Starting mount transaction limiter: max %g transactions/s with burst %d", opt.TPSLimit, tpsBurst)
}
return fsys
}
// Limit the number of transactions per second
func (fsys *FS) rateLimit() {
if fsys.tps != nil {
_ = fsys.tps.Wait(context.Background())
}
}
// Open a handle returning an integer file handle
func (fsys *FS) openHandle(handle vfs.Handle) (fh uint64) {
fsys.mu.Lock()
@@ -195,6 +210,7 @@ func (fsys *FS) Destroy() {
// Getattr reads the attributes for path
func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%v", &errc)
fsys.rateLimit()
node, _, errc := fsys.getNode(path, fh)
if errc == 0 {
errc = fsys.stat(node, stat)
@@ -205,6 +221,7 @@ func (fsys *FS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {
// Opendir opens path as a directory
func (fsys *FS) Opendir(path string) (errc int, fh uint64) {
defer log.Trace(path, "")("errc=%d, fh=0x%X", &errc, &fh)
fsys.rateLimit()
handle, err := fsys.VFS.OpenFile(path, os.O_RDONLY, 0777)
if err != nil {
return translateError(err), fhUnset
@@ -219,6 +236,7 @@ func (fsys *FS) Readdir(dirPath string,
fh uint64) (errc int) {
itemsRead := -1
defer log.Trace(dirPath, "ofst=%d, fh=0x%X", ofst, fh)("items=%d, errc=%d", &itemsRead, &errc)
fsys.rateLimit()
dir, errc := fsys.lookupDir(dirPath)
if errc != 0 {
@@ -270,12 +288,14 @@ func (fsys *FS) Readdir(dirPath string,
// Releasedir finished reading the directory
func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
fsys.rateLimit()
return fsys.closeHandle(fh)
}
// Statfs reads overall stats on the filesystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
fsys.rateLimit()
const blockSize = 4096
total, _, free := fsys.VFS.Statfs()
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
@@ -295,6 +315,7 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
// OpenEx opens a file
func (fsys *FS) OpenEx(path string, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(path, "flags=0x%X", fi.Flags)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fsys.rateLimit()
fi.Fh = fhUnset
// translate the fuse flags to os flags
@@ -325,6 +346,7 @@ func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
// CreateEx creates and opens a file.
func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (errc int) {
defer log.Trace(filePath, "flags=0x%X, mode=0%o", fi.Flags, mode)("errc=%d, fh=0x%X", &errc, &fi.Fh)
fsys.rateLimit()
fi.Fh = fhUnset
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
@@ -356,6 +378,7 @@ func (fsys *FS) Create(filePath string, flags int, mode uint32) (errc int, fh ui
// Truncate truncates a file to size
func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
defer log.Trace(path, "size=%d, fh=0x%X", size, fh)("errc=%d", &errc)
fsys.rateLimit()
node, handle, errc := fsys.getNode(path, fh)
if errc != 0 {
return errc
@@ -375,6 +398,7 @@ func (fsys *FS) Truncate(path string, size int64, fh uint64) (errc int) {
// Read data from file handle
func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
fsys.rateLimit()
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
@@ -390,6 +414,7 @@ func (fsys *FS) Read(path string, buff []byte, ofst int64, fh uint64) (n int) {
// Write data to file handle
func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
defer log.Trace(path, "ofst=%d, fh=0x%X", ofst, fh)("n=%d", &n)
fsys.rateLimit()
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
@@ -404,6 +429,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
// Flush flushes an open file descriptor or path
func (fsys *FS) Flush(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
fsys.rateLimit()
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
@@ -414,6 +440,7 @@ func (fsys *FS) Flush(path string, fh uint64) (errc int) {
// Release closes the file if still open
func (fsys *FS) Release(path string, fh uint64) (errc int) {
defer log.Trace(path, "fh=0x%X", fh)("errc=%d", &errc)
fsys.rateLimit()
handle, errc := fsys.getHandle(fh)
if errc != 0 {
return errc
@@ -425,6 +452,7 @@ func (fsys *FS) Release(path string, fh uint64) (errc int) {
// Unlink removes a file.
func (fsys *FS) Unlink(filePath string) (errc int) {
defer log.Trace(filePath, "")("errc=%d", &errc)
fsys.rateLimit()
leaf, parentDir, errc := fsys.lookupParentDir(filePath)
if errc != 0 {
return errc
@@ -435,6 +463,7 @@ func (fsys *FS) Unlink(filePath string) (errc int) {
// Mkdir creates a directory.
func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
defer log.Trace(dirPath, "mode=0%o", mode)("errc=%d", &errc)
fsys.rateLimit()
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
@@ -446,6 +475,7 @@ func (fsys *FS) Mkdir(dirPath string, mode uint32) (errc int) {
// Rmdir removes a directory
func (fsys *FS) Rmdir(dirPath string) (errc int) {
defer log.Trace(dirPath, "")("errc=%d", &errc)
fsys.rateLimit()
leaf, parentDir, errc := fsys.lookupParentDir(dirPath)
if errc != 0 {
return errc
@@ -456,6 +486,7 @@ func (fsys *FS) Rmdir(dirPath string) (errc int) {
// Rename renames a file.
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
fsys.rateLimit()
return translateError(fsys.VFS.Rename(oldPath, newPath))
}
@@ -467,6 +498,7 @@ var invalidDateCutoff = time.Date(1601, 1, 2, 0, 0, 0, 0, time.UTC)
// Utimens changes the access and modification times of a file.
func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
defer log.Trace(path, "tmsp=%+v", tmsp)("errc=%d", &errc)
fsys.rateLimit()
node, errc := fsys.lookupNode(path)
if errc != 0 {
return errc
@@ -487,12 +519,14 @@ func (fsys *FS) Utimens(path string, tmsp []fuse.Timespec) (errc int) {
// Mknod creates a file node.
func (fsys *FS) Mknod(path string, mode uint32, dev uint64) (errc int) {
defer log.Trace(path, "mode=0x%X, dev=0x%X", mode, dev)("errc=%d", &errc)
fsys.rateLimit()
return -fuse.ENOSYS
}
// Fsync synchronizes file contents.
func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
fsys.rateLimit()
// This is a no-op for rclone
return 0
}
@@ -500,24 +534,28 @@ func (fsys *FS) Fsync(path string, datasync bool, fh uint64) (errc int) {
// Link creates a hard link to a file.
func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
defer log.Trace(oldpath, "newpath=%q", newpath)("errc=%d", &errc)
fsys.rateLimit()
return -fuse.ENOSYS
}
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
fsys.rateLimit()
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
fsys.rateLimit()
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
defer log.Trace(path, "mode=0%o", mode)("errc=%d", &errc)
fsys.rateLimit()
// This is a no-op for rclone
return 0
}
@@ -525,6 +563,7 @@ func (fsys *FS) Chmod(path string, mode uint32) (errc int) {
// Chown changes the owner and group of a file.
func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
defer log.Trace(path, "uid=%d, gid=%d", uid, gid)("errc=%d", &errc)
fsys.rateLimit()
// This is a no-op for rclone
return 0
}
@@ -532,6 +571,7 @@ func (fsys *FS) Chown(path string, uid uint32, gid uint32) (errc int) {
// Access checks file access permissions.
func (fsys *FS) Access(path string, mask uint32) (errc int) {
defer log.Trace(path, "mask=0%o", mask)("errc=%d", &errc)
fsys.rateLimit()
// This is a no-op for rclone
return 0
}
@@ -539,27 +579,32 @@ func (fsys *FS) Access(path string, mask uint32) (errc int) {
// Fsyncdir synchronizes directory contents.
func (fsys *FS) Fsyncdir(path string, datasync bool, fh uint64) (errc int) {
defer log.Trace(path, "datasync=%v, fh=0x%X", datasync, fh)("errc=%d", &errc)
fsys.rateLimit()
// This is a no-op for rclone
return 0
}
// Setxattr sets extended attributes.
func (fsys *FS) Setxattr(path string, name string, value []byte, flags int) (errc int) {
fsys.rateLimit()
return -fuse.ENOSYS
}
// Getxattr gets extended attributes.
func (fsys *FS) Getxattr(path string, name string) (errc int, value []byte) {
fsys.rateLimit()
return -fuse.ENOSYS, nil
}
// Removexattr removes extended attributes.
func (fsys *FS) Removexattr(path string, name string) (errc int) {
fsys.rateLimit()
return -fuse.ENOSYS
}
// Listxattr lists extended attributes.
func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
fsys.rateLimit()
return -fuse.ENOSYS
}

View File

@@ -12,7 +12,6 @@ import (
"fmt"
"os"
"runtime"
"strings"
"sync/atomic"
"time"
@@ -26,7 +25,7 @@ import (
func init() {
name := "cmount"
cmountOnly := ProvidedBy(runtime.GOOS)
cmountOnly := runtime.GOOS == "windows" || runtime.GOOS == "darwin"
if cmountOnly {
name = "mount"
}
@@ -37,19 +36,6 @@ func init() {
mountlib.AddRc("cmount", mount)
}
// Find the option string in the current options
func findOption(name string, options []string) (found bool) {
for _, option := range options {
if option == "-o" {
continue
}
if strings.Contains(option, name) {
return true
}
}
return false
}
// mountOptions configures the options from the command line flags
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
// Options
@@ -119,13 +105,6 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
for _, option := range opt.ExtraFlags {
options = append(options, option)
}
if runtime.GOOS == "darwin" {
if !findOption("modules=iconv", options) {
iconv := "modules=iconv,from_code=UTF-8,to_code=UTF-8-MAC"
options = append(options, "-o", iconv)
fs.Debugf(nil, "Adding \"-o %s\" for macOS", iconv)
}
}
return options
}
@@ -159,7 +138,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
// Create underlying FS
f := VFS.Fs()
fsys := NewFS(VFS)
fsys := NewFS(VFS, opt)
host := fuse.NewFileSystemHost(fsys)
host.SetCapReaddirPlus(true) // only works on Windows
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)

View File

@@ -103,9 +103,8 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
} else if !os.IsNotExist(err) {
return "", errors.Wrap(err, "failed to retrieve mountpoint path information")
}
if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:"
mountpath = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator
}
//if isDriveRootPath(mountpath) { // Assume intention with "X:\" was "X:"
// mountpoint = mountpath[:len(mountpath)-1] // WinFsp needs drive mountpoints without trailing path separator
if !isDrive(mountpath) {
// Assuming directory path, since it is not a pure drive letter string such as "X:".
// Drive letter string can be used as is, since we have already checked it does not exist,
@@ -114,12 +113,14 @@ func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, erro
fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
opt.NetworkMode = false
}
var err error
if mountpath, err = filepath.Abs(mountpath); err != nil { // Ensures parent is found but also more informative log messages
return "", errors.Wrap(err, "mountpoint path is not valid: "+mountpath)
}
parent := filepath.Join(mountpath, "..")
if _, err = os.Stat(parent); err != nil {
if parent == "" || parent == "." {
return "", errors.New("mountpoint directory is not valid: " + parent)
}
if os.IsPathSeparator(parent[len(parent)-1]) { // Ends in a separator only if it is the root directory
return "", errors.New("mountpoint directory is at root: " + parent)
}
if _, err := os.Stat(parent); err != nil {
if os.IsNotExist(err) {
return "", errors.New("parent of mountpoint directory does not exist: " + parent)
}

View File

@@ -3,7 +3,6 @@ package copyurl
import (
"context"
"errors"
"fmt"
"os"
"github.com/rclone/rclone/cmd"
@@ -14,17 +13,15 @@ import (
)
var (
autoFilename = false
printFilename = false
stdout = false
noClobber = false
autoFilename = false
stdout = false
noClobber = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename")
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
}
@@ -36,16 +33,15 @@ var commandDefinition = &cobra.Command{
Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting ` + "`--auto-filename`" + `will cause the file name to be retrieved from
Setting --auto-filename will cause the file name to be retrieved from
the from URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
be printed.
path.
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the
Setting --no-clobber will prevent overwriting file on the
destination if there is one with the same name.
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
will cause the output to be written to standard output.
Setting --stdout or making the output file name "-" will cause the
output to be written to standard output.
`,
RunE: func(command *cobra.Command, args []string) (err error) {
cmd.CheckArgs(1, 2, command, args)
@@ -65,14 +61,10 @@ will cause the output to be written to standard output.
}
}
cmd.Run(true, true, command, func() error {
var dst fs.Object
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
if printFilename && err == nil && dst != nil {
fmt.Println(dst.Remote())
}
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
}
return err
})

View File

@@ -32,8 +32,8 @@ By default ` + "`dedupe`" + ` interactively finds files with duplicate
names and offers to delete all but one or rename them to be
different. This is known as deduping by name.
Deduping by name is only useful with a small group of backends (e.g. Google Drive,
Opendrive) that can have duplicate file names. It can be run on wrapping backends
Deduping by name is only useful with backends like Google Drive which
can have duplicate file names. It can be run on wrapping backends
(e.g. crypt) if they wrap a backend which supports duplicate file
names.

View File

@@ -1,7 +1,7 @@
package info
// FIXME once translations are implemented will need a no-escape
// option for Put so we can make these tests work again
// option for Put so we can make these tests work agaig
import (
"bytes"
@@ -9,7 +9,6 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"path"
"regexp"
@@ -21,8 +20,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/test"
"github.com/rclone/rclone/cmd/test/info/internal"
"github.com/rclone/rclone/cmd/info/internal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
@@ -37,7 +35,6 @@ var (
checkControl bool
checkLength bool
checkStreaming bool
all bool
uploadWait time.Duration
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
@@ -45,15 +42,14 @@ var (
)
func init() {
test.Command.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization.")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters.")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size.")
flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
}
var commandDefinition = &cobra.Command{
@@ -63,20 +59,10 @@ var commandDefinition = &cobra.Command{
to write to the paths passed in and how long they can be. It can take some
time. It will write test files into the remote:path passed in. It outputs
a bit of go code for each one.
**NB** this can create undeletable files and other hazards - use with care
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1e6, command, args)
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !all {
log.Fatalf("no tests selected - select a test or use -all")
}
if all {
checkNormalization = true
checkControl = true
checkLength = true
checkStreaming = true
}
for i := range args {
f := cmd.NewFsDir(args[i : i+1])
cmd.Run(false, false, command, func() error {

View File

@@ -11,7 +11,7 @@ import (
"sort"
"strconv"
"github.com/rclone/rclone/cmd/test/info/internal"
"github.com/rclone/rclone/cmd/info/internal"
)
func main() {

View File

@@ -41,7 +41,7 @@ When uses with the -l flag it lists the types too.
}
for _, remote := range remotes {
if listLong {
remoteType := config.FileGet(remote, "type")
remoteType := config.FileGet(remote, "type", "UNKNOWN")
fmt.Printf("%-*s %s\n", maxlen+1, remote+":", remoteType)
} else {
fmt.Printf("%s:\n", remote)

View File

@@ -1,4 +1,4 @@
package memory
package memtest
import (
"context"
@@ -6,19 +6,19 @@ import (
"sync"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/test"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
test.Command.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefinition)
}
var commandDefinition = &cobra.Command{
Use: "memory remote:path",
Short: `Load all the objects at remote:path into memory and report memory stats.`,
Use: "memtest remote:path",
Short: `Load all the objects at remote:path and report memory stats.`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)

View File

@@ -181,15 +181,6 @@ func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
return nil
}
// Invalidate a leaf in a directory
func (d *Dir) invalidateEntry(dirNode fusefs.Node, leaf string) {
fs.Debugf(dirNode, "Invalidating %q", leaf)
err := d.fsys.server.InvalidateEntry(dirNode, leaf)
if err != nil {
fs.Debugf(dirNode, "Failed to invalidate %q: %v", leaf, err)
}
}
// Check interface satisfied
var _ fusefs.NodeRenamer = (*Dir)(nil)
@@ -206,13 +197,6 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
return translateError(err)
}
// Invalidate the new directory entry so it gets re-read (in
// the background otherwise we cause a deadlock)
//
// See https://github.com/rclone/rclone/issues/4977 for why
go d.invalidateEntry(newDir, req.NewName)
//go d.invalidateEntry(d, req.OldName)
return nil
}

View File

@@ -20,9 +20,8 @@ import (
// FS represents the top level filing system
type FS struct {
*vfs.VFS
f fs.Fs
opt *mountlib.Options
server *fusefs.Server
f fs.Fs
opt *mountlib.Options
}
// Check interface satisfied

View File

@@ -91,12 +91,12 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
}
filesys := NewFS(VFS, opt)
filesys.server = fusefs.New(c, nil)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
errChan := make(chan error, 1)
go func() {
err := filesys.server.Serve(filesys)
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr

View File

@@ -45,6 +45,7 @@ type Options struct {
DaemonTimeout time.Duration // OSXFUSE only
AsyncRead bool
NetworkMode bool // Windows only
TPSLimit float64
}
// DefaultOpt is the default values for creating the mount
@@ -71,7 +72,7 @@ const (
func init() {
// DaemonTimeout defaults to non zero for macOS
if runtime.GOOS == "darwin" {
DefaultOpt.DaemonTimeout = 10 * time.Minute
DefaultOpt.DaemonTimeout = 15 * time.Minute
}
}
@@ -97,6 +98,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads. Not supported on Windows.")
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads. Not supported on Windows.")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.")
flags.Float64VarP(flagSet, &Opt.TPSLimit, "mount-tpslimit", "", 0, "Limit mount transactions per second to this. (0 is off).")
// Windows and OSX
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name. Supported on Windows and OSX only.")
// OSX only
@@ -179,15 +181,15 @@ is an **empty** **existing** directory:
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. The following examples will mount to an automatically assigned drive,
to specific drive letter |X:|, to path |C:\path\parent\mount|
(where parent directory or drive must exist, and mount must **not** exist,
to specific drive letter |X:|, to path |C:\path\to\nonexistent\directory|
(which must be **non-existent** subdirectory of an **existing** parent directory or drive,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share |\\cloud\remote| and map it to an
automatically assigned drive:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
rclone @ remote:path/to/files \\cloud\remote
When the program ends while in foreground mode, either via Ctrl+C or receiving
@@ -241,14 +243,14 @@ and experience unexpected program errors, freezes or other issues, consider moun
as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **non-existent** subdirectory of an **existing** parent
or to a path - which must be **non-existent** subdirectory of an **existing** parent
directory or drive. Using the special value |*| will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files C:\path\to\nonexistent\directory
rclone @ remote:path/to/files X:
Option |--volname| can be used to set a custom volume name for the mounted
@@ -321,24 +323,10 @@ Note that the mapping of permissions is not always trivial, and the result
you see in Windows Explorer may not be exactly like you expected.
For example, when setting a value that includes write access, this will be
mapped to individual permissions "write attributes", "write data" and "append data",
but not "write extended attributes". Windows will then show this as basic
permission "Special" instead of "Write", because "Write" includes the
"write extended attributes" permission.
If you set POSIX permissions for only allowing access to the owner, using
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
group will still be given some special permissions, such as "read attributes"
and "read permissions", in Windows. This is done for compatibility reasons,
e.g. to allow users without additional permissions to be able to read basic
metadata about files like in UNIX. One case that may arise is that other programs
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
but not "write extended attributes" (WinFsp does not support extended attributes,
see [this](https://github.com/billziss-gh/winfsp/wiki/NTFS-Compatibility)).
Windows will then show this as basic permission "Special" instead of "Write",
because "Write" includes the "write extended attributes" permission.
#### Windows caveats
@@ -362,7 +350,7 @@ Without the use of |--vfs-cache-mode| this can only write files
sequentially, it can only seek when reading. This means that many
applications won't work with their files on an rclone mount without
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
See the [VFS File Caching](#vfs-file-caching) section for more info.
See the [File Caching](#file-caching) section for more info.
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
Hubic) do not support the concept of empty directories, so empty
@@ -377,7 +365,7 @@ File systems expect things to be 100% reliable, whereas cloud storage
systems are a long way from 100% reliable. The rclone sync/copy
commands cope with this with lots of retries. However rclone @
can't use retries in the same way without making local copies of the
uploads. Look at the [VFS File Caching](#vfs-file-caching)
uploads. Look at the [file caching](#file-caching)
for solutions to make @ more reliable.
### Attribute caching

View File

@@ -13,7 +13,6 @@ import (
_ "github.com/rclone/rclone/cmd/cmount"
_ "github.com/rclone/rclone/cmd/mount"
_ "github.com/rclone/rclone/cmd/mount2"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -21,7 +20,6 @@ import (
func TestRc(t *testing.T) {
ctx := context.Background()
configfile.LoadConfig(ctx)
mount := rc.Calls.Get("mount/mount")
assert.NotNil(t, mount)
unmount := rc.Calls.Get("mount/unmount")

View File

@@ -16,7 +16,6 @@ import (
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/jobs"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -165,7 +164,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
if call == nil {
return nil, errors.Errorf("method %q not found", path)
}
_, out, err := jobs.NewJob(ctx, call.Fn, in)
out, err = call.Fn(context.Background(), in)
if err != nil {
return nil, errors.Wrap(err, "loopback call failed")
}

View File

@@ -3,13 +3,10 @@ package rcd
import (
"context"
"log"
"sync"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/spf13/cobra"
)
@@ -51,22 +48,6 @@ See the [rc documentation](/rc/) for more info on the rc flags.
log.Fatal("rc server not configured")
}
// Notify stopping on exit
var finaliseOnce sync.Once
finalise := func() {
finaliseOnce.Do(func() {
_ = sysdnotify.Stopping()
})
}
fnHandle := atexit.Register(finalise)
defer atexit.Unregister(fnHandle)
// Notify ready to systemd
if err := sysdnotify.Ready(); err != nil {
log.Fatalf("failed to notify ready to systemd: %v", err)
}
s.Wait()
finalise()
},
}

View File

@@ -1,51 +0,0 @@
package selfupdate
// Note: "|" will be replaced by backticks in the help string below
var selfUpdateHelp string = `
This command downloads the latest release of rclone and replaces
the currently running binary. The download is verified with a hashsum
and cryptographically signed signature.
If used without flags (or with implied |--stable| flag), this command
will install the latest stable release. However, some issues may be fixed
(or features added) only in the latest beta release. In such cases you should
run the command with the |--beta| flag, i.e. |rclone selfupdate --beta|.
You can check in advance what version would be installed by adding the
|--check| flag, then repeat the command without it when you are satisfied.
Sometimes the rclone team may recommend you a concrete beta or stable
rclone release to troubleshoot your issue or add a bleeding edge feature.
The |--version VER| flag, if given, will update to the concrete version
instead of the latest one. If you omit micro version from |VER| (for
example |1.53|), the latest matching micro version will be used.
Upon successful update rclone will print a message that contains a previous
version number. You will need it if you later decide to revert your update
for some reason. Then you'll have to note the previous version and run the
following command: |rclone selfupdate [--beta] OLDVER|.
If the old version contains only dots and digits (for example |v1.54.0|)
then it's a stable release so you won't need the |--beta| flag. Beta releases
have an additional information similar to |v1.54.0-beta.5111.06f1c0c61|.
(if you are a developer and use a locally built rclone, the version number
will end with |-DEV|, you will have to rebuild it as it obvisously can't
be distributed).
If you previously installed rclone via a package manager, the package may
include local documentation or configure services. You may wish to update
with the flag |--package deb| or |--package rpm| (whichever is correct for
your OS) to update these too. This command with the default |--package zip|
will update only the rclone executable so the local manual may become
inaccurate after it.
The |rclone mount| command (https://rclone.org/commands/rclone_mount/) may
or may not support extended FUSE options depending on the build and OS.
|selfupdate| will refuse to update if the capability would be discarded.
Note: Windows forbids deletion of a currently running executable so this
command will rename the old executable to 'rclone.old.exe' upon success.
Please note that this command was not available before rclone version 1.55.
If it fails for you with the message |unknown command "selfupdate"| then
you will need to update manually following the install instructions located
at https://rclone.org/install/
`

View File

@@ -1,480 +0,0 @@
package selfupdate
import (
"archive/zip"
"bufio"
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
versionCmd "github.com/rclone/rclone/cmd/version"
)
// Options contains options for the self-update command
type Options struct {
Check bool
Output string // output path
Beta bool // mutually exclusive with Stable (false means "stable")
Stable bool // mutually exclusive with Beta
Version string
Package string // package format: zip, deb, rpm (empty string means "zip")
}
// Opt is options set via command line
var Opt = Options{}
func init() {
cmd.Root.AddCommand(cmdSelfUpdate)
cmdFlags := cmdSelfUpdate.Flags()
flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download.")
flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)")
flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)")
flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release.")
flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given rclone version (default: latest)")
flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)")
}
var cmdSelfUpdate = &cobra.Command{
Use: "selfupdate",
Aliases: []string{"self-update"},
Short: `Update the rclone binary.`,
Long: strings.ReplaceAll(selfUpdateHelp, "|", "`"),
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
if Opt.Package == "" {
Opt.Package = "zip"
}
gotActionFlags := Opt.Stable || Opt.Beta || Opt.Output != "" || Opt.Version != "" || Opt.Package != "zip"
if Opt.Check && !gotActionFlags {
versionCmd.CheckVersion()
return
}
if Opt.Package != "zip" {
if Opt.Package != "deb" && Opt.Package != "rpm" {
log.Fatalf("--package should be one of zip|deb|rpm")
}
if runtime.GOOS != "linux" {
log.Fatalf(".deb and .rpm packages are supported only on Linux")
} else if os.Geteuid() != 0 && !Opt.Check {
log.Fatalf(".deb and .rpm must be installed by root")
}
if Opt.Output != "" && !Opt.Check {
fmt.Println("Warning: --output is ignored with --package deb|rpm")
}
}
if err := InstallUpdate(context.Background(), &Opt); err != nil {
log.Fatalf("Error: %v", err)
}
},
}
// GetVersion can get the latest release number from the download site
// or massage a stable release number - prepend semantic "v" prefix
// or find the latest micro release for a given major.minor release.
// Note: this will not be applied to beta releases.
func GetVersion(ctx context.Context, beta bool, version string) (newVersion, siteURL string, err error) {
siteURL = "https://downloads.rclone.org"
if beta {
siteURL = "https://beta.rclone.org"
}
if version == "" {
// Request the latest release number from the download site
_, newVersion, _, err = versionCmd.GetVersion(siteURL + "/version.txt")
return
}
newVersion = version
if version[0] != 'v' {
newVersion = "v" + version
}
if beta {
return
}
if valid, _ := regexp.MatchString(`^v\d+\.\d+(\.\d+)?$`, newVersion); !valid {
return "", siteURL, errors.New("invalid semantic version")
}
// Find the latest stable micro release
if strings.Count(newVersion, ".") == 1 {
html, err := downloadFile(ctx, siteURL)
if err != nil {
return "", siteURL, errors.Wrap(err, "failed to get list of releases")
}
reSubver := fmt.Sprintf(`href="\./%s\.\d+/"`, regexp.QuoteMeta(newVersion))
allSubvers := regexp.MustCompile(reSubver).FindAllString(string(html), -1)
if allSubvers == nil {
return "", siteURL, errors.New("could not find the minor release")
}
// Use the fact that releases in the index are sorted by date
lastSubver := allSubvers[len(allSubvers)-1]
newVersion = lastSubver[8 : len(lastSubver)-2]
}
return
}
// InstallUpdate performs rclone self-update
func InstallUpdate(ctx context.Context, opt *Options) error {
// Find the latest release number
if opt.Stable && opt.Beta {
return errors.New("--stable and --beta are mutually exclusive")
}
gotCmount := false
for _, tag := range buildinfo.Tags {
if tag == "cmount" {
gotCmount = true
break
}
}
if gotCmount && !cmount.ProvidedBy(runtime.GOOS) {
return errors.New("updating would discard the mount FUSE capability, aborting")
}
newVersion, siteURL, err := GetVersion(ctx, opt.Beta, opt.Version)
if err != nil {
return errors.Wrap(err, "unable to detect new version")
}
oldVersion := fs.Version
if newVersion == oldVersion {
fmt.Println("rclone is up to date")
return nil
}
// Install .deb/.rpm package if requested by user
if opt.Package == "deb" || opt.Package == "rpm" {
if opt.Check {
fmt.Println("Warning: --package flag is ignored in --check mode")
} else {
err := installPackage(ctx, opt.Beta, newVersion, siteURL, opt.Package)
if err == nil {
fmt.Printf("Successfully updated rclone package from version %s to version %s\n", oldVersion, newVersion)
}
return err
}
}
// Get the current executable path
executable, err := os.Executable()
if err != nil {
return errors.Wrap(err, "unable to find executable")
}
targetFile := opt.Output
if targetFile == "" {
targetFile = executable
}
if opt.Check {
fmt.Printf("Without --check this would install rclone version %s at %s\n", newVersion, targetFile)
return nil
}
// Make temporary file names and check for possible access errors in advance
var newFile string
if newFile, err = makeRandomExeName(targetFile, "new"); err != nil {
return err
}
savedFile := ""
if runtime.GOOS == "windows" {
savedFile = targetFile
if strings.HasSuffix(savedFile, ".exe") {
savedFile = savedFile[:len(savedFile)-4]
}
savedFile += ".old.exe"
}
if savedFile == executable || newFile == executable {
return fmt.Errorf("%s: a temporary file would overwrite the executable, specify a different --output path", targetFile)
}
if err := verifyAccess(targetFile); err != nil {
return err
}
// Download the update as a temporary file
err = downloadUpdate(ctx, opt.Beta, newVersion, siteURL, newFile, "zip")
if err != nil {
return errors.Wrap(err, "failed to update rclone")
}
err = replaceExecutable(targetFile, newFile, savedFile)
if err == nil {
fmt.Printf("Successfully updated rclone from version %s to version %s\n", oldVersion, newVersion)
}
return err
}
func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error {
tempFile, err := ioutil.TempFile("", "rclone.*."+packageFormat)
if err != nil {
return errors.Wrap(err, "unable to write temporary package")
}
packageFile := tempFile.Name()
_ = tempFile.Close()
defer func() {
if rmErr := os.Remove(packageFile); rmErr != nil {
fs.Errorf(nil, "%s: could not remove temporary package: %v", packageFile, rmErr)
}
}()
if err := downloadUpdate(ctx, beta, version, siteURL, packageFile, packageFormat); err != nil {
return err
}
packageCommand := "dpkg"
if packageFormat == "rpm" {
packageCommand = "rpm"
}
cmd := exec.Command(packageCommand, "-i", packageFile)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to run %s: %v", packageCommand, err)
}
return nil
}
func replaceExecutable(targetFile, newFile, savedFile string) error {
// Copy permission bits from the old executable
// (it was extracted with mode 0755)
fileInfo, err := os.Lstat(targetFile)
if err == nil {
if err = os.Chmod(newFile, fileInfo.Mode()); err != nil {
return errors.Wrap(err, "failed to set permission")
}
}
if err = os.Remove(targetFile); os.IsNotExist(err) {
err = nil
}
if err != nil && savedFile != "" {
// Windows forbids removal of a running executable so we rename it.
// For starters, rename download as the original file with ".old.exe" appended.
var saveErr error
if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) {
saveErr = nil
}
if saveErr == nil {
saveErr = os.Rename(targetFile, savedFile)
}
if saveErr != nil {
// The ".old" file cannot be removed or cannot be renamed to.
// This usually means that the running executable has a name with ".old".
// This can happen in very rare cases, but we ought to handle it.
// Try inserting a randomness in the name to mitigate it.
fs.Debugf(nil, "%s: cannot replace old file, randomizing name", savedFile)
savedFile, saveErr = makeRandomExeName(targetFile, "old")
if saveErr == nil {
if saveErr = os.Remove(savedFile); os.IsNotExist(saveErr) {
saveErr = nil
}
}
if saveErr == nil {
saveErr = os.Rename(targetFile, savedFile)
}
}
if saveErr == nil {
fmt.Printf("The old executable was saved as %s\n", savedFile)
err = nil
}
}
if err == nil {
err = os.Rename(newFile, targetFile)
}
if err != nil {
if rmErr := os.Remove(newFile); rmErr != nil {
fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr)
}
return err
}
return nil
}
func makeRandomExeName(baseName, extension string) (string, error) {
const maxAttempts = 5
if runtime.GOOS == "windows" {
if strings.HasSuffix(baseName, ".exe") {
baseName = baseName[:len(baseName)-4]
}
extension += ".exe"
}
for attempt := 0; attempt < maxAttempts; attempt++ {
filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension)
if _, err := os.Stat(filename); os.IsNotExist(err) {
return filename, nil
}
}
return "", fmt.Errorf("cannot find a file name like %s.xxxx.%s", baseName, extension)
}
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
osName := runtime.GOOS
arch := runtime.GOARCH
if arch == "darwin" {
arch = "osx"
}
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)
archiveURL := fmt.Sprintf("%s/%s/%s", siteURL, version, archiveFilename)
archiveBuf, err := downloadFile(ctx, archiveURL)
if err != nil {
return err
}
gotHash := sha256.Sum256(archiveBuf)
strHash := hex.EncodeToString(gotHash[:])
fs.Debugf(nil, "downloaded release archive with hashsum %s from %s", strHash, archiveURL)
// CI/CD does not provide hashsums for beta releases
if !beta {
if err := verifyHashsum(ctx, siteURL, version, archiveFilename, gotHash[:]); err != nil {
return err
}
}
if packageFormat == "deb" || packageFormat == "rpm" {
if err := ioutil.WriteFile(newFile, archiveBuf, 0644); err != nil {
return errors.Wrap(err, "cannot write temporary ."+packageFormat)
}
return nil
}
entryName := fmt.Sprintf("rclone-%s-%s-%s/rclone", version, osName, arch)
if runtime.GOOS == "windows" {
entryName += ".exe"
}
// Extract executable to a temporary file, then replace it by an instant rename
err = extractZipToFile(archiveBuf, entryName, newFile)
if err != nil {
return err
}
fs.Debugf(nil, "extracted %s to %s", entryName, newFile)
return nil
}
func verifyAccess(file string) error {
admin := "root"
if runtime.GOOS == "windows" {
admin = "Administrator"
}
fileInfo, fileErr := os.Lstat(file)
if fileErr != nil {
dir := filepath.Dir(file)
dirInfo, dirErr := os.Lstat(dir)
if dirErr != nil {
return dirErr
}
if !dirInfo.Mode().IsDir() {
return fmt.Errorf("%s: parent path is not a directory, specify a different path using --output", dir)
}
if !writable(dir) {
return fmt.Errorf("%s: directory is not writable, please run self-update as %s", dir, admin)
}
}
if fileErr == nil && !fileInfo.Mode().IsRegular() {
return fmt.Errorf("%s: path is not a normal file, specify a different path using --output", file)
}
if fileErr == nil && !writable(file) {
return fmt.Errorf("%s: file is not writable, run self-update as %s", file, admin)
}
return nil
}
func findFileHash(buf []byte, filename string) (hash []byte, err error) {
lines := bufio.NewScanner(bytes.NewReader(buf))
for lines.Scan() {
tokens := strings.Split(lines.Text(), " ")
if len(tokens) == 2 && tokens[1] == filename {
if hash, err := hex.DecodeString(tokens[0]); err == nil {
return hash, nil
}
}
}
return nil, fmt.Errorf("%s: unable to find hash", filename)
}
func extractZipToFile(buf []byte, entryName, newFile string) error {
zipReader, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
if err != nil {
return err
}
var reader io.ReadCloser
for _, entry := range zipReader.File {
if entry.Name == entryName {
reader, err = entry.Open()
break
}
}
if reader == nil || err != nil {
return fmt.Errorf("%s: file not found in archive", entryName)
}
defer func() {
_ = reader.Close()
}()
err = os.Remove(newFile)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("%s: unable to create new file: %v", newFile, err)
}
writer, err := os.OpenFile(newFile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, os.FileMode(0755))
if err != nil {
return err
}
_, err = io.Copy(writer, reader)
_ = writer.Close()
if err != nil {
if rmErr := os.Remove(newFile); rmErr != nil {
fs.Errorf(nil, "%s: could not remove temporary file: %v", newFile, rmErr)
}
}
return err
}
func downloadFile(ctx context.Context, url string) ([]byte, error) {
resp, err := fshttp.NewClient(ctx).Get(url)
if err != nil {
return nil, err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed with %s downloading %s", resp.Status, url)
}
return ioutil.ReadAll(resp.Body)
}

View File

@@ -1,198 +0,0 @@
package selfupdate
import (
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
)
func TestGetVersion(t *testing.T) {
testy.SkipUnreliable(t)
ctx := context.Background()
// a beta version can only have "v" prepended
resultVer, _, err := GetVersion(ctx, true, "1.2.3.4")
assert.NoError(t, err)
assert.Equal(t, "v1.2.3.4", resultVer)
// but a stable version syntax should be checked
_, _, err = GetVersion(ctx, false, "1")
assert.Error(t, err)
_, _, err = GetVersion(ctx, false, "1.")
assert.Error(t, err)
_, _, err = GetVersion(ctx, false, "1.2.")
assert.Error(t, err)
_, _, err = GetVersion(ctx, false, "1.2.3.4")
assert.Error(t, err)
// incomplete stable version should have micro release added
resultVer, _, err = GetVersion(ctx, false, "1.52")
assert.NoError(t, err)
assert.Equal(t, "v1.52.3", resultVer)
}
func makeTestDir() (testDir string, err error) {
const maxAttempts = 5
testDirBase := filepath.Join(os.TempDir(), "rclone-test-selfupdate.")
for attempt := 0; attempt < maxAttempts; attempt++ {
testDir = testDirBase + random.String(4)
err = os.MkdirAll(testDir, os.ModePerm)
if err == nil {
break
}
}
return
}
func TestInstallOnLinux(t *testing.T) {
testy.SkipUnreliable(t)
if runtime.GOOS != "linux" {
t.Skip("this is a Linux only test")
}
// Prepare for test
ctx := context.Background()
testDir, err := makeTestDir()
assert.NoError(t, err)
path := filepath.Join(testDir, "rclone")
defer func() {
_ = os.Chmod(path, 0644)
_ = os.RemoveAll(testDir)
}()
regexVer := regexp.MustCompile(`v[0-9]\S+`)
betaVer, _, err := GetVersion(ctx, true, "")
assert.NoError(t, err)
// Must do nothing if version isn't changing
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path, Version: fs.Version}))
// Must fail on non-writable file
assert.NoError(t, ioutil.WriteFile(path, []byte("test"), 0644))
assert.NoError(t, os.Chmod(path, 0000))
err = (InstallUpdate(ctx, &Options{Beta: true, Output: path}))
assert.Error(t, err)
assert.Contains(t, err.Error(), "run self-update as root")
// Must keep non-standard permissions
assert.NoError(t, os.Chmod(path, 0644))
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
info, err := os.Stat(path)
assert.NoError(t, err)
assert.Equal(t, os.FileMode(0644), info.Mode().Perm())
// Must remove temporary files
files, err := ioutil.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 1, len(files))
// Must contain valid executable
assert.NoError(t, os.Chmod(path, 0755))
cmd := exec.Command(path, "version")
output, err := cmd.CombinedOutput()
assert.NoError(t, err)
assert.True(t, cmd.ProcessState.Success())
assert.Equal(t, betaVer, regexVer.FindString(string(output)))
}
func TestRenameOnWindows(t *testing.T) {
testy.SkipUnreliable(t)
if runtime.GOOS != "windows" {
t.Skip("this is a Windows only test")
}
// Prepare for test
ctx := context.Background()
testDir, err := makeTestDir()
assert.NoError(t, err)
defer func() {
_ = os.RemoveAll(testDir)
}()
path := filepath.Join(testDir, "rclone.exe")
regexVer := regexp.MustCompile(`v[0-9]\S+`)
stableVer, _, err := GetVersion(ctx, false, "")
assert.NoError(t, err)
betaVer, _, err := GetVersion(ctx, true, "")
assert.NoError(t, err)
// Must not create temporary files when target doesn't exist
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
files, err := ioutil.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 1, len(files))
// Must save running executable as the "old" file
cmdWait := exec.Command(path, "config")
stdinWait, err := cmdWait.StdinPipe() // Make it run waiting for input
assert.NoError(t, err)
assert.NoError(t, cmdWait.Start())
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: false, Output: path}))
files, err = ioutil.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 2, len(files))
pathOld := filepath.Join(testDir, "rclone.old.exe")
_, err = os.Stat(pathOld)
assert.NoError(t, err)
cmd := exec.Command(path, "version")
output, err := cmd.CombinedOutput()
assert.NoError(t, err)
assert.True(t, cmd.ProcessState.Success())
assert.Equal(t, stableVer, regexVer.FindString(string(output)))
cmdOld := exec.Command(pathOld, "version")
output, err = cmdOld.CombinedOutput()
assert.NoError(t, err)
assert.True(t, cmdOld.ProcessState.Success())
assert.Equal(t, betaVer, regexVer.FindString(string(output)))
// Stop previous waiting executable, run new and saved executables
_ = stdinWait.Close()
_ = cmdWait.Wait()
time.Sleep(100 * time.Millisecond)
cmdWait = exec.Command(path, "config")
stdinWait, err = cmdWait.StdinPipe()
assert.NoError(t, err)
assert.NoError(t, cmdWait.Start())
cmdWaitOld := exec.Command(pathOld, "config")
stdinWaitOld, err := cmdWaitOld.StdinPipe()
assert.NoError(t, err)
assert.NoError(t, cmdWaitOld.Start())
// Updating when the "old" executable is running must produce a random "old" file
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
files, err = ioutil.ReadDir(testDir)
assert.NoError(t, err)
assert.Equal(t, 3, len(files))
// Stop all waiting executables
_ = stdinWait.Close()
_ = cmdWait.Wait()
_ = stdinWaitOld.Close()
_ = cmdWaitOld.Wait()
time.Sleep(100 * time.Millisecond)
}

View File

@@ -1,72 +0,0 @@
package selfupdate
import (
"bytes"
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/clearsign"
)
var ncwPublicKeyPGP = `-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGiBDuy3V0RBADVQOAF5aFiCxD3t2h6iAF2WMiaMlgZ6kX2i/u7addNkzX71VU9
7NpI0SnsP5YWt+gEedST6OmFbtLfZWCR4KWn5XnNdjCMNhxaH6WccVqNm4ALPIqT
59uVjkgf8RISmmoNJ1d+2wMWjQTUfwOEmoIgH6n+2MYNUKuctBrwAACflwCg1I1Q
O/prv/5hczdpQCs+fL87DxsD/Rt7pIXvsIOZyQWbIhSvNpGalJuMkW5Jx92UjsE9
1Ipo3Xr6SGRPgW9+NxAZAsiZfCX/19knAyNrN9blwL0rcPDnkhdGwK69kfjF+wq+
QbogRGodbKhqY4v+cMNkKiemBuTQiWPkpKjifwNsD1fNjNKfDP3pJ64Yz7a4fuzV
X1YwBACpKVuEen34lmcX6ziY4jq8rKibKBs4JjQCRO24kYoHDULVe+RS9krQWY5b
e0foDhru4dsKccefK099G+WEzKVCKxupstWkTT/iJwajR8mIqd4AhD0wO9W3MCfV
Ov8ykMDZ7qBWk1DHc87Ep3W1o8t8wq74ifV+HjhhWg8QAylXg7QlTmljayBDcmFp
Zy1Xb29kIDxuaWNrQGNyYWlnLXdvb2QuY29tPohxBBMRCAAxBQsHCgMEAxUDAgMW
AgECF4AWIQT79zfs6firGGBL0qyTk14C/ztU+gUCXjg2UgIZAQAKCRCTk14C/ztU
+lmmAJ4jH5FyULzStjisuTvHLTVz6G44eQCfaR5QGZFPseenE5ic2WeQcBcmtoG5
Ag0EO7LdgRAIAI6QdFBg3/xa1gFKPYy1ihV9eSdGqwWZGJvokWsfCvHy5180tj/v
UNOLAJrdqglMSvevNTXe8bT65D6423AAsLhch9wq/aNqrHolTYABzxRigjcS1//T
yln5naGUzlVQXDVfrDk3Md/NrkdOFj7r/YyMF0+iWwpFz2qAjL95i5wfVZ1kWGrT
2AmivE1wD1sWT/Ja3FDI0NRkU0Nbz/a0TKe4ml8iLVtZXpTRbxxCCPdkHXXgSyu1
eZ4NrF/wTJuvwGn12TJ1EF95aVkHxAUw0+KmLGdcyBG+IKuHamrsjWIAXGXV///K
AxPgUthccQ03HMjltFsrdmen5Q034YM3eOsAAwUH/jAKiIAA8LpZmZPnt9GZ4+Ol
Zp22VAfyfDOFl4Ol+cWjkLAgjAFsm5gnOKcRSE/9XPxnQqkhw7+ZygYuUMgTDJ99
/5IM1UQL3ooS+oFrDaE99S8bLeOe17skcdXcA/K83VqD9m93rQRnbtD+75zqKkZn
9WNFyKCXg5P6PFPdNYRtlQKOcwFR9mHRLUmapQSAM8Y2pCgALZ7GViKQca8/TT1T
gZk9fJMZYGez+IlOPxTJxjn80+vywk4/wdIWSiQj+8u5RzT9sjmm77wbMVNGRqYd
W/EemW9Zz9vi0CIvJGgbPMqcuxw8e/5lnuQ6Mi3uDR0P2RNIAhFrdZpVSME8xQaI
RgQYEQIABgUCO7LdgQAKCRCTk14C/ztU+mLBAKC2cdFy7eLaQAvyzcE2VK6HVIjn
JACguA00bxLQuJ4+RCJrLFZP8ZlN2sc=
=TtR5
-----END PGP PUBLIC KEY BLOCK-----`
func verifyHashsum(ctx context.Context, siteURL, version, archive string, hash []byte) error {
sumsURL := fmt.Sprintf("%s/%s/SHA256SUMS", siteURL, version)
sumsBuf, err := downloadFile(ctx, sumsURL)
if err != nil {
return err
}
fs.Debugf(nil, "downloaded hashsum list: %s", sumsURL)
keyRing, err := openpgp.ReadArmoredKeyRing(strings.NewReader(ncwPublicKeyPGP))
if err != nil {
return errors.New("unsupported signing key")
}
block, rest := clearsign.Decode(sumsBuf)
// block.Bytes = block.Bytes[1:] // uncomment to test invalid signature
_, err = openpgp.CheckDetachedSignature(keyRing, bytes.NewReader(block.Bytes), block.ArmoredSignature.Body)
if err != nil || len(rest) > 0 {
return errors.New("invalid hashsum signature")
}
wantHash, err := findFileHash(sumsBuf, archive)
if err != nil {
return err
}
if !bytes.Equal(hash, wantHash) {
return fmt.Errorf("archive hash mismatch: want %02x vs got %02x", wantHash, hash)
}
return nil
}

View File

@@ -1,11 +0,0 @@
// +build !windows,!plan9,!js
package selfupdate
import (
"golang.org/x/sys/unix"
)
func writable(path string) bool {
return unix.Access(path, unix.W_OK) == nil
}

View File

@@ -1,7 +0,0 @@
// +build plan9 js
package selfupdate
func writable(path string) bool {
return true
}

View File

@@ -1,16 +0,0 @@
// +build windows
package selfupdate
import (
"os"
)
func writable(path string) bool {
info, err := os.Stat(path)
const UserWritableBit = 128
if err == nil {
return info.Mode().Perm()&UserWritableBit != 0
}
return false
}

View File

@@ -13,12 +13,12 @@ import (
"github.com/anacrolix/dms/soap"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/vfs"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -41,7 +41,7 @@ func startServer(t *testing.T, f fs.Fs) {
}
func TestInit(t *testing.T) {
configfile.LoadConfig(context.Background())
config.LoadConfig(context.Background())
f, err := fs.NewFs(context.Background(), "testdata/files")
l, _ := f.List(context.Background(), "")

View File

@@ -12,7 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/filter"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -61,7 +61,7 @@ var (
func TestInit(t *testing.T) {
ctx := context.Background()
// Configure the remote
configfile.LoadConfig(context.Background())
config.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true

View File

@@ -1,7 +1,6 @@
package restic
import (
"context"
"crypto/rand"
"encoding/hex"
"io"
@@ -13,7 +12,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/stretchr/testify/require"
)
@@ -65,8 +63,6 @@ func createOverwriteDeleteSeq(t testing.TB, path string) []TestRequest {
// TestResticHandler runs tests on the restic handler code, especially in append-only mode.
func TestResticHandler(t *testing.T) {
ctx := context.Background()
configfile.LoadConfig(ctx)
buf := make([]byte, 32)
_, err := io.ReadFull(rand.Reader, buf)
require.NoError(t, err)

Some files were not shown because too many files have changed in this diff Show More